Inf. 00's TIL

Things I've learned.

openclaw local에서 test

2026-02-04 · openclaw
# mac에서 docker start
colima start
docker info


# docker-compose symlink
mkdir -p ~/.docker/cli-plugins
ln -sf "$(which docker-compose)" ~/.docker/cli-plugins/docker-compose
docker compose version
brew install bash


# docker에서 테스트
# 결국 docker에서 local llm 돌리는 것은 포기
docker compose up -d openclaw-gateway
docker compose logs -f --tail=200 openclaw-gateway
docker compose up openclaw-gateway
docker compose run --rm openclaw-cli dashboard --no-open


# repo clone 사용
node dist/index.js onboard
node dist/index.jso
node dist/index.jso tui


# telegram 에러: docker에 openclaw 열어둬서 에러였음
For the Telegram 409 Conflict:
This usually means another instance is already connected. Check for running processes:
ps aux | grep clawdbot
ps aux | grep -i "openclaw\|dist/index.js" | grep -v grep

# reference
config: https://docs.openclaw.ai/gateway/configuration-examples
openclaw with vllm: https://unsloth.ai/docs/models/glm-4.7-flash
browser doc: https://docs.openclaw.ai/tools/browser

  "models": {
    "mode": "merge",
    "providers": {
      "lmstudio": {
        "baseUrl": "http://127.0.0.1:1234/v1",
        "apiKey": "lm-studio",
        "api": "openai-responses",
        "models": [
          {
            "id": "nvidia/nemotron-3-nano",
            "name": "lmstudio",
            "reasoning": false,
            "input": [
              "text"
            ],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 16000,
            "maxTokens": 16000
          }
        ]
      }
    }
  },
  "agents": {
    "defaults": {
      "model": {
        "primary": "lmstudio/nvidia/nemotron-3-nano"
      },
      "models": {
        "lmstudio/nvidia/nemotron-3-nano": {
          "alias": "Nemotron 3 Nano"
        }
      },
      "workspace": "/Users/user/.openclaw/workspace",
      "compaction": {
        "mode": "safeguard"
      },
      "maxConcurrent": 4,
      "subagents": {
        "maxConcurrent": 8
      }
    }
  },