vetrag/Makefile

110 lines
4.0 KiB
Makefile

# Makefile for running the Vet Clinic Chat Assistant locally with Ollama
.PHONY: run ollama-start ollama-stop ollama-pull ollama-status curl-embed curl-translate curl-chat
# Start Ollama server (if not already running)
ollama-start:
ollama serve &
@echo "Ollama server started."
# Stop Ollama server
ollama-stop:
pkill -f "ollama serve" || true
@echo "Ollama server stopped."
# Pull a model (default: llama3)
ollama-pull:
ollama pull qwen3:latest
# Show Ollama status
ollama-status:
ollama list
# Ollama host & models (override as needed)
OLLAMA_HOST ?= http://localhost:11434
# Primary chat / reasoning model (already using OPENAI_MODEL var for compatibility)
OPENAI_MODEL ?= qwen3:latest
# Optional separate embedding model
OLLAMA_EMBED_MODEL ?= all-minilm
# Translation prompt (mirrors config.yaml translate_prompt). Can override: make curl-translate PROMPT="..." TRANSLATE_PROMPT="..."
TRANSLATE_PROMPT ?= Translate the following veterinary-related sentence to English. Input: '$(PROMPT)'. Return ONLY the English translation, no extra text, no markdown, no quotes. If already English, return as is.
# Database configuration (override via: make run DB_PASSWORD=secret DB_NAME=other)
DB_HOST ?= localhost
DB_PORT ?= 5432
DB_USER ?= postgres
DB_PASSWORD ?= postgres
DB_NAME ?= ledger-balance-service
DB_SSLMODE ?= disable
# Derived env export snippet for DB
db_env = PGHOST=$(DB_HOST) PGPORT=$(DB_PORT) PGUSER=$(DB_USER) PGPASSWORD=$(DB_PASSWORD) PGDATABASE=$(DB_NAME) PGSSLMODE=$(DB_SSLMODE)
# Run the Go server (assumes Ollama is running) with DB env vars
run:
$(db_env) OPENAI_API_KEY=ollama OPENAI_BASE_URL=http://localhost:11434/api/chat OPENAI_MODEL=qwen3:latest go run .
# Run without pulling model (faster if already present)
run-fast:
$(db_env) OPENAI_API_KEY=ollama OPENAI_BASE_URL=http://localhost:11434/api/chat OPENAI_MODEL=qwen3:latest go run .
# Quick psql shell (requires psql installed)
psql:
$(db_env) psql || true
# Print the DSN that main.go will assemble
print-dsn:
@echo postgres://$(DB_USER):******@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?sslmode=$(DB_SSLMODE)
# Run tests
.PHONY: test test-verbose test-race test-coverage test-coverage-html
# Run standard tests
test:
go test ./...
# Run tests with verbose output
test-verbose:
go test -v ./...
# Run tests with race detection
test-race:
go test -race ./...
# Run tests with coverage reporting
test-coverage:
go test -coverprofile=coverage.out ./...
go tool cover -func=coverage.out
# Run tests with HTML coverage report
test-coverage-html: test-coverage
go tool cover -html=coverage.out
# --- Utility curl targets ---
# Example: make curl-embed PROMPT="warm up"
curl-embed:
@test -n "$(PROMPT)" || { echo "Usage: make curl-embed PROMPT='text' [OLLAMA_EMBED_MODEL=model]"; exit 1; }
@echo "[curl-embed] model=$(OLLAMA_EMBED_MODEL) prompt='$(PROMPT)'"
@curl -sS -X POST "$(OLLAMA_HOST)/api/embeddings" \
-H 'Content-Type: application/json' \
-d '{"model":"$(OLLAMA_EMBED_MODEL)","prompt":"$(PROMPT)"}' | jq . || true
# Example: make curl-translate PROMPT="A kutyám nem eszik"
curl-translate:
@test -n "$(PROMPT)" || { echo "Usage: make curl-translate PROMPT='sentence to translate'"; exit 1; }
@echo "[curl-translate] model=$(OPENAI_MODEL)"; \
PROMPT_JSON=$$(printf '%s' "$(TRANSLATE_PROMPT)" | jq -Rs .); \
curl -sS -X POST "$(OLLAMA_HOST)/api/chat" \
-H 'Content-Type: application/json' \
-d '{"model":"$(OPENAI_MODEL)","messages":[{"role":"user","content":'$$PROMPT_JSON'}],"stream":false}' | jq -r '.message.content' || true
# Generic chat invocation (raw user PROMPT)
# Example: make curl-chat PROMPT="List 3 dog breeds"
curl-chat:
@test -n "$(PROMPT)" || { echo "Usage: make curl-chat PROMPT='your message'"; exit 1; }
@echo "[curl-chat] model=$(OPENAI_MODEL)"; \
PROMPT_JSON=$$(printf '%s' "$(PROMPT)" | jq -Rs .); \
curl -sS -X POST "$(OLLAMA_HOST)/api/chat" \
-H 'Content-Type: application/json' \
-d '{"model":"$(OPENAI_MODEL)","messages":[{"role":"user","content":'$$PROMPT_JSON'}],"stream":false}' | jq -r '.message.content' || true