name: Local Assistant version: 1.0.0 schema: v1 models: - name: Qwen3-coder provider: ollama apiBase: http://10.1.109.10:11434 model: hoangquan456/qwen3-nothink:1.7b roles: - edit - apply - autocomplete - chat - name: Nomic Embed provider: ollama apiBase: http://10.1.109.10:11434 model: nomic-embed-text:latest roles: - embed - name: Autodetect provider: ollama model: AUTODETECT system_message: "You are an AI assistant running locally on an 8GB GPU. Keep responses concise and efficient." context_providers: - name: "file" # Current file context params: max_chars: 2000 # Prevent overload - name: "terminal" # Shell command output params: max_lines: 50 - name: "diff" # Git/svn changes params: max_chars: 1000 - name: "github" # PRs/issues (needs auth) params: repo: "your/repo" # Optional filtering - name: "search" # Codebase search params: max_results: 3 - name: "url" # Webpage context params: max_chars: 1500 - name: "open" # Recently opened files params: max_files: 5