projects:zibaldone:vscode

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
projects:zibaldone:vscode [2025/04/16 06:59] – [continue.dev] sscipioniprojects:zibaldone:vscode [2025/09/13 21:44] (current) sscipioni
Line 155: Line 155:
  
  
-===== continue.dev =====+===== ollama ===== 
 + 
 +models: 
 +- https://ollama.com/hoangquan456/qwen3-nothink
  
 install ollama install ollama
Line 164: Line 167:
 sudo systemctl start ollama sudo systemctl start ollama
  
 +ollama pull hoangquan456/qwen3-nothink:1.7b
 ollama pull nomic-embed-text:latest ollama pull nomic-embed-text:latest
-ollama pull qwen2.5-coder:1.5b-base +ollama pull llama3.2 
-ollama pull llama3.1:8b+ 
 + 
 +# mimo and a alias 
 +ollama pull hf.co/jedisct1/MiMo-7B-RL-GGUF:Q4_K_M 
 +ollama cp hf.co/jedisct1/MiMo-7B-RL-GGUF:Q4_K_M mimo 
 + 
 +# vision 
 +ollama pull ZimaBlueAI/MiniCPM-o-2_6
 </code> </code>
  
-disable copilot github CTRL+SHIFT+p+===== void ===== 
 + 
 +<code> 
 +void --install-extension ms-python.python@2025.6.1 
 +</code> 
 + 
 +use pyright instead of pylance as python language server 
 + 
 +===== kilo code ===== 
 + 
 +disable copilot githubCTRL+SHIFT+p and search for "Chat: hide AI ..." 
 + 
 + 
 +qdrant vectore store 
 +<code | docker-compose.yaml> 
 +services: 
 +  qdrant: 
 +    image: qdrant/qdrant 
 +    ports: 
 +      - '6333:6333' 
 +    volumes: 
 +      - qdrant_storage:/qdrant/storage 
 +    restart: unless-stopped 
 +volumes: 
 +  qdrant_storage: 
 +</code> 
 +===== continue.dev ===== 
 + 
 + 
 + 
 +disable copilot github: CTRL+SHIFT+p and search for "Chat: hide AI ..."
  
 install continue.dev extension install continue.dev extension
Line 179: Line 220:
 schema: v1 schema: v1
 models: models:
-  - name: Llama 3.1 8B+  - name: Qwen3-coder
     provider: ollama     provider: ollama
-    modelllama3.1:8b+    apiBasehttp://10.1.109.10:11434 
 +    model: hoangquan456/qwen3-nothink:1.7b
     roles:     roles:
-      - chat 
       - edit       - edit
       - apply       - apply
-  - name: Qwen2.5-Coder 1.5B 
-    provider: ollama 
-    model: qwen2.5-coder:1.5b-base 
-    roles: 
       - autocomplete       - autocomplete
 +      - chat
   - name: Nomic Embed   - name: Nomic Embed
     provider: ollama     provider: ollama
 +    apiBase: http://10.1.109.10:11434
     model: nomic-embed-text:latest     model: nomic-embed-text:latest
     roles:     roles:
Line 199: Line 238:
     provider: ollama     provider: ollama
     model: AUTODETECT     model: AUTODETECT
-context+system_message: "You are an AI assistant running locally on an 8GB GPU. Keep responses concise and efficient." 
-  - providercode +context_providers
-  - providerdocs +  - name"file"  # Current file context 
-  - provider: diff +    params: 
-  - providerterminal +      max_chars: 2000  # Prevent overload 
-  - providerproblems +  - name: "terminal"  # Shell command output 
-  - providerfolder +    params: 
-  - providercodebase+      max_lines50 
 +  - name"diff # Git/svn changes 
 +    params: 
 +      max_chars: 1000 
 +  - name"github"  # PRs/issues (needs auth) 
 +    params: 
 +      repo: "your/repo"  # Optional filtering 
 +  - name"search"  # Codebase search 
 +    params: 
 +      max_results: 3 
 +  - name"url"  # Webpage context 
 +    params: 
 +      max_chars: 1500 
 +  - name: "open"  # Recently opened files 
 +    params: 
 +      max_files5
 </code> </code>
  
  • projects/zibaldone/vscode.1744779578.txt.gz
  • Last modified: 2025/04/16 06:59
  • by sscipioni