Compare commits

...

10 commits

Author SHA1 Message Date
f326dca8a9 fix: change summarizer call condition to check if summarizer is nil instead of environment variables
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-17 11:10:13 -05:00
513af56fff feat: add AnthropicSummarizer implementation using anthropic-sdk-go package
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 20:28:52 -05:00
7f2629d09c fix: remove extraneous parameters from SummarizeData call and use properly initialized OllamaSummarizer
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 17:21:12 -05:00
2034bee99c feat: add Ollama summarizer support to main command 2025-11-16 17:20:40 -05:00
bffdff73a4 refactor: simplify Summarizer interface by moving endpoint, token, and model to struct properties
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 16:41:11 -05:00
9e82b77276 refactor: simplify OpenAI and Ollama summarizer implementations 2025-11-16 16:40:17 -05:00
214cdcd2b2 feat: implement Ollama Summarizer using official SDK as per article example
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 16:21:09 -05:00
d239689ef4 refactor: move OpenAI variable checks into OpenAISummarizer's Summarize method and always call the summarizer's method
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 14:21:00 -05:00
feb06e51ff refactor: extract callSummarizationEndpoint into Summarizer interface for multiple implementations
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-16 14:13:20 -05:00
979aa66b17 fix: always call SummarizeData to ensure prompt file is created for debugging, but only call OpenAI endpoint if env vars are set
Co-authored-by: aider (openai/qwen3-coder:30b-a3b-q4_K_M) <aider@aider.chat>
2025-11-14 11:22:21 -05:00
2 changed files with 180 additions and 64 deletions

View file

@ -67,23 +67,38 @@ func main() {
// vikunjaTasks = DoVikunja(*start, *end) // vikunjaTasks = DoVikunja(*start, *end)
// } // }
// Get OpenAI environment variables // Get environment variables
openaiEndpoint := os.Getenv("OPENAI_ENDPOINT") openaiEndpoint := os.Getenv("OPENAI_ENDPOINT")
openaiToken := os.Getenv("OPENAI_TOKEN") openaiToken := os.Getenv("OPENAI_TOKEN")
openaiModel := os.Getenv("OPENAI_MODEL") openaiModel := os.Getenv("OPENAI_MODEL")
anthropicModel := os.Getenv("ANTHROPIC_MODEL")
// Check if OpenAI environment variables are set before calling Summarize // Create appropriate summarizer based on available environment variables
if openaiEndpoint == "" || openaiToken == "" { var summarizer Summarizer
fmt.Println("Error: OPENAI_ENDPOINT and OPENAI_TOKEN must be set in environment variables to summarize") if openaiEndpoint != "" && openaiToken != "" {
os.Exit(1) // Use OpenAI summarizer
summarizer = NewOpenAISummarizer(openaiEndpoint, openaiToken, openaiModel)
} else if anthropicModel != "" {
// Use Anthropic summarizer
summarizer = NewAnthropicSummarizer(anthropicModel)
} else {
// Use Ollama summarizer as fallback
summarizer = NewOllamaSummarizer("", "", openaiModel)
} }
summ, err := SummarizeData(*employeename, prs, issues, vikunjaTasks, finalPrompt, openaiEndpoint, openaiToken, openaiModel) // Always call SummarizeData to ensure prompt file is created for debugging
summ, err := SummarizeData(*employeename, prs, issues, vikunjaTasks, finalPrompt, summarizer)
if err != nil { if err != nil {
fmt.Println(fmt.Errorf("error getting PRs: %w", err)) fmt.Println(fmt.Errorf("error getting PRs: %w", err))
os.Exit(1) os.Exit(1)
} }
// Only call summarization endpoint if we have a valid summarizer
if summarizer != nil {
fmt.Println(summ) fmt.Println(summ)
} else {
fmt.Println("No summarization endpoint configured, but prompt file was created for debugging")
}
} }
func DoPrs(proj, ghusername, start, end string) map[string][]contributions.PullRequest { func DoPrs(proj, ghusername, start, end string) map[string][]contributions.PullRequest {

View file

@ -1,17 +1,18 @@
package main package main
import ( import (
"bytes" "context"
"encoding/json"
"fmt" "fmt"
"io"
"net/http"
"os" "os"
"time" "time"
"o5r.ca/autocrossbow/contributions" "o5r.ca/autocrossbow/contributions"
"o5r.ca/autocrossbow/issues" "o5r.ca/autocrossbow/issues"
"o5r.ca/autocrossbow/issues/vikunja" "o5r.ca/autocrossbow/issues/vikunja"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/ollama/ollama/api"
) )
const defaultPrompt = `I will provide you, for a given period, with an employee name and a list of Pull Request titles and summaries split by repository, and a list of Jira Issues an employee has worked on. I may also provide, optionally, the employee's self-assessment. If I do, integrate that. const defaultPrompt = `I will provide you, for a given period, with an employee name and a list of Pull Request titles and summaries split by repository, and a list of Jira Issues an employee has worked on. I may also provide, optionally, the employee's self-assessment. If I do, integrate that.
@ -19,6 +20,152 @@ const defaultPrompt = `I will provide you, for a given period, with an employee
I'd like you to summarize the employee's accomplishments for the quarter I'd like you to summarize the employee's accomplishments for the quarter
I'd like the summary for the accomplishments to be in prose form, in a few paragraphs separated based on areas of work. Keep answers to 500 words for the summary.` I'd like the summary for the accomplishments to be in prose form, in a few paragraphs separated based on areas of work. Keep answers to 500 words for the summary.`
// Summarizer interface defines the contract for summarization implementations
type Summarizer interface {
Summarize(prompt string) (string, error)
}
// OpenAISummarizer implements the Summarizer interface for OpenAI-compatible endpoints
type OpenAISummarizer struct {
endpoint string
token string
model string
}
// NewOpenAISummarizer creates a new OpenAISummarizer with the given parameters
func NewOpenAISummarizer(endpoint, token, model string) *OpenAISummarizer {
return &OpenAISummarizer{
endpoint: endpoint,
token: token,
model: model,
}
}
// Summarize sends the prompt to an OpenAI-compatible endpoint for summarization
func (o *OpenAISummarizer) Summarize(fullPrompt string) (string, error) {
// Check if required environment variables are set
if o.endpoint == "" || o.token == "" {
return "", fmt.Errorf("OpenAI endpoint or token not set")
}
// Create the request
ctx := context.Background()
client, _ := api.ClientFromEnvironment()
req := api.GenerateRequest{
Model: o.model,
Prompt: fullPrompt,
Stream: nil,
}
var result string
err := client.Generate(ctx, &req, func(resp api.GenerateResponse) error {
result += resp.Response
return nil
})
if err != nil {
return "", err
}
return result, nil
}
// OllamaSummarizer implements the Summarizer interface for Ollama endpoints
type OllamaSummarizer struct {
endpoint string
token string
model string
}
// NewOllamaSummarizer creates a new OllamaSummarizer with the given parameters
func NewOllamaSummarizer(endpoint, token, model string) *OllamaSummarizer {
return &OllamaSummarizer{
endpoint: endpoint,
token: token,
model: model,
}
}
// Summarize sends the prompt to an Ollama endpoint for summarization
func (o *OllamaSummarizer) Summarize(fullPrompt string) (string, error) {
// Check if required parameters are set
if o.model == "" {
return "", fmt.Errorf("Ollama model not set")
}
// Create the request
ctx := context.Background()
client, _ := api.ClientFromEnvironment()
req := &api.GenerateRequest{
Model: o.model,
Prompt: fullPrompt,
Stream: nil,
}
var result string
err := client.Generate(ctx, req, func(resp api.GenerateResponse) error {
result += resp.Response
return nil
})
if err != nil {
return "", err
}
return result, nil
}
// AnthropicSummarizer implements the Summarizer interface for Anthropic API
type AnthropicSummarizer struct {
client *anthropic.Client
model string
}
// NewAnthropicSummarizer creates a new AnthropicSummarizer with the given parameters
func NewAnthropicSummarizer(model string) *AnthropicSummarizer {
// Create the Anthropic client with the API key from environment
client := anthropic.NewClient(
option.WithAPIKey(os.Getenv("ANTHROPIC_API_KEY")),
)
return &AnthropicSummarizer{
client: client,
model: model,
}
}
// Summarize sends the prompt to the Anthropic API for summarization
func (a *AnthropicSummarizer) Summarize(fullPrompt string) (string, error) {
// Check if required parameters are set
if a.model == "" {
return "", fmt.Errorf("Anthropic model not set")
}
// Create the request
ctx := context.Background()
message, err := a.client.Messages.New(ctx, anthropic.MessageNewParams{
Model: a.model,
MaxTokens: 1024,
Messages: []anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock(fullPrompt)),
},
})
if err != nil {
return "", err
}
// Extract the response text
var result string
for _, content := range message.Content {
if textBlock, ok := content.AsAny().(*anthropic.TextBlock); ok {
result += textBlock.Text
}
}
return result, nil
}
// buildPrompt constructs the prompt string from PRs, issues, and tasks // buildPrompt constructs the prompt string from PRs, issues, and tasks
func buildPrompt(employeename string, prs map[string][]contributions.PullRequest, issues []issues.Issue, tasks []vikunja.Task, prompt string) string { func buildPrompt(employeename string, prs map[string][]contributions.PullRequest, issues []issues.Issue, tasks []vikunja.Task, prompt string) string {
// Build a prompt string // Build a prompt string
@ -49,59 +196,13 @@ func buildPrompt(employeename string, prs map[string][]contributions.PullRequest
return fullPrompt return fullPrompt
} }
// callSummarizationEndpoint sends the prompt to an OpenAI-compatible endpoint for summarization
func callSummarizationEndpoint(fullPrompt string, openaiEndpoint string, openaiToken string, openaiModel string) (string, error) {
// Create a JSON payload for the OpenAI API
payload := struct {
Model string `json:"model"`
Messages []struct {
Role string `json:"role"`
Content string `json:"content"`
} `json:"messages"`
}{
Model: openaiModel,
Messages: []struct {
Role string `json:"role"`
Content string `json:"content"`
}{{Role: "system", Content: fullPrompt}},
}
jsonPayload, err := json.Marshal(payload)
fmt.Println(string(jsonPayload))
if err != nil {
return "", err
}
// Create a POST request to the OpenAI endpoint with JSON body
req, err := http.NewRequest("POST", openaiEndpoint, bytes.NewBuffer(jsonPayload))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", openaiToken))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
// SummarizeData builds the prompt and calls the summarization endpoint // SummarizeData builds the prompt and calls the summarization endpoint
func SummarizeData(employeename string, prs map[string][]contributions.PullRequest, issues []issues.Issue, tasks []vikunja.Task, prompt string, openaiEndpoint string, openaiToken string, openaiModel string) (string, error) { func SummarizeData(employeename string, prs map[string][]contributions.PullRequest, issues []issues.Issue, tasks []vikunja.Task, prompt string, summarizer Summarizer) (string, error) {
// Build the prompt // Build the prompt
fullPrompt := buildPrompt(employeename, prs, issues, tasks, prompt) fullPrompt := buildPrompt(employeename, prs, issues, tasks, prompt)
// Call the summarization endpoint // Always call the summarizer's Summarize method
result, err := callSummarizationEndpoint(fullPrompt, openaiEndpoint, openaiToken, openaiModel) result, err := summarizer.Summarize(fullPrompt)
if err != nil { if err != nil {
return "", err return "", err
} }