gitignore

This commit is contained in:
lehel 2025-10-01 14:09:35 +02:00
parent 0ec0ef10d8
commit 4647a3ad43
No known key found for this signature in database
GPG Key ID: 9C4F9D6111EE5CFA
6 changed files with 118 additions and 19 deletions

37
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: CI
on:
push:
branches: [ main, master ]
pull_request:
workflow_dispatch:
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 10
env:
# Provide dummy/default env vars so code paths that read them won't fail.
OPENAI_API_KEY: dummy
# Default to local Ollama endpoint for tests (tests mock LLM so it's unused).
OPENAI_BASE_URL: http://localhost:11434/api/chat
OPENAI_MODEL: qwen3:latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
cache: true
- name: Go Vet
run: go vet ./...
- name: Run Tests
run: go test -count=1 ./...
- name: Build (sanity)
run: go build -v ./...

1
.gitignore vendored
View File

@ -1 +1,2 @@
reasons.bleve
visits.bleve

91
llm.go
View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"text/template"
@ -104,45 +105,105 @@ func (llm *LLMClient) DisambiguateBestMatch(ctx context.Context, message string,
return visitReason, nil
}
// openAICompletion calls Ollama API with prompt and structure, returns structured result
// openAICompletion now supports both Ollama (default local) and OpenRouter/OpenAI-compatible APIs without external branching.
// It auto-detects by inspecting the BaseURL. If the URL contains "openrouter.ai" or "/v1/", it assumes OpenAI-style.
func (llm *LLMClient) openAICompletion(ctx context.Context, prompt string, format map[string]interface{}) (string, error) {
apiURL := llm.BaseURL
if apiURL == "" {
// Default to Ollama local chat endpoint
apiURL = "http://localhost:11434/api/chat"
}
logrus.WithFields(logrus.Fields{"api_url": apiURL, "prompt": prompt, "format": format}).Info("[LLM] openAICompletion POST")
body := map[string]interface{}{
"model": llm.Model, // "qwen3:latest",
isOpenAIStyle := strings.Contains(apiURL, "openrouter.ai") || strings.Contains(apiURL, "/v1/")
// Build request body depending on style
var body map[string]interface{}
if isOpenAIStyle {
// OpenAI / OpenRouter style (chat.completions)
// Use response_format with JSON schema when provided.
responseFormat := map[string]interface{}{
"type": "json_schema",
"json_schema": map[string]interface{}{
"name": "structured_output",
"schema": format,
},
}
body = map[string]interface{}{
"model": llm.Model,
"messages": []map[string]string{{"role": "user", "content": prompt}},
"response_format": responseFormat,
}
} else {
// Ollama structured output extension
body = map[string]interface{}{
"model": llm.Model,
"messages": []map[string]string{{"role": "user", "content": prompt}},
"stream": false,
"format": format,
}
}
jsonBody, _ := json.Marshal(body)
req, _ := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewBuffer(jsonBody))
logrus.WithFields(logrus.Fields{"api_url": apiURL, "prompt": prompt, "is_openai_style": isOpenAIStyle}).Info("[LLM] completion POST")
req, _ := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewBuffer(jsonBody))
if llm.APIKey != "" {
// OpenRouter expects: Authorization: Bearer sk-... or OR-... depending on key type
req.Header.Set("Authorization", "Bearer "+llm.APIKey)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
logrus.WithError(err).Error("[LLM] openAICompletion error")
logrus.WithError(err).Error("[LLM] completion HTTP error")
return "", err
}
defer resp.Body.Close()
var result struct {
raw, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed reading response body: %w", err)
}
logrus.WithFields(logrus.Fields{"status": resp.StatusCode, "raw": string(raw)}).Debug("[LLM] completion raw response")
// Attempt Ollama format first (backwards compatible)
var ollama struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
logrus.WithError(err).Error("[LLM] openAICompletion decode error")
return "", err
if err := json.Unmarshal(raw, &ollama); err == nil && ollama.Message.Content != "" {
logrus.WithField("content", ollama.Message.Content).Info("[LLM] completion (ollama) parsed")
return ollama.Message.Content, nil
}
if result.Message.Content == "" {
logrus.Warn("[LLM] openAICompletion: no content returned %v body:[%v]", resp.Status, resp.Body)
return "", nil
// Attempt OpenAI / OpenRouter style
var openAI struct {
Choices []struct {
Message struct {
Role string `json:"role"`
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
Error *struct {
Message string `json:"message"`
Type string `json:"type"`
} `json:"error"`
}
logrus.WithField("content", result.Message.Content).Info("[LLM] openAICompletion: got content")
return result.Message.Content, nil
if err := json.Unmarshal(raw, &openAI); err == nil {
if openAI.Error != nil {
return "", fmt.Errorf("provider error: %s (%s)", openAI.Error.Message, openAI.Error.Type)
}
if len(openAI.Choices) > 0 && openAI.Choices[0].Message.Content != "" {
content := openAI.Choices[0].Message.Content
logrus.WithField("content", content).Info("[LLM] completion (openai) parsed")
return content, nil
}
}
// If still nothing, return error with snippet
return "", fmt.Errorf("unrecognized LLM response format: %.200s", string(raw))
}
// LLMClientAPI allows mocking LLMClient in other places

Binary file not shown.

Binary file not shown.

Binary file not shown.