87 lines
2.6 KiB
Go
87 lines
2.6 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
|
|
"bitbucket.bit.admin.ch/scm/~u80859501/pierre-bot/internal/chatter"
|
|
"bitbucket.bit.admin.ch/scm/~u80859501/pierre-bot/internal/gitadapters"
|
|
"bitbucket.bit.admin.ch/scm/~u80859501/pierre-bot/internal/pierre"
|
|
"github.com/alecthomas/kong"
|
|
kongyaml "github.com/alecthomas/kong-yaml"
|
|
)
|
|
|
|
type BitbucketConfig struct {
|
|
BaseURL string `help:"Bitbucket Base URL (e.g. https://bitbucket.example.com)" required:"" env:"BITBUCKET_URL"`
|
|
Token string `help:"Bearer Token" env:"BITBUCKET_TOKEN"`
|
|
// Positional arguments
|
|
Project string `arg:"" help:"Project Key (e.g. PROJ)" env:"BITBUCKET_PROJECT"`
|
|
Repo string `arg:"" help:"Repository Slug" env:"BITBUCKET_REPO"`
|
|
PRID int `arg:"" help:"Pull Request ID" name:"pr"`
|
|
}
|
|
|
|
type LLMConfig struct {
|
|
Provider string `help:"Provider for llm (ollama or gemini)" required:"" env:"LLM_PROVIDER"`
|
|
Endpoint string `help:"Endpoint for provider (only for ollama)" env:"LLM_ENDPOINT"`
|
|
APIKey string `help:"APIKey for provider" env:"LLM_API_KEY"`
|
|
Model string `help:"Model to use" env:"LLM_MODEL"`
|
|
}
|
|
|
|
type Config struct {
|
|
Bitbucket BitbucketConfig `embed:"" prefix:"bitbucket-"`
|
|
LLM LLMConfig `embed:"" prefix:"llm-"`
|
|
Config kong.ConfigFlag `help:"Path to a YAML config file"`
|
|
}
|
|
|
|
func main() {
|
|
cfg := &Config{}
|
|
|
|
// Parse flags, env vars, and config files
|
|
kong.Parse(cfg,
|
|
kong.Name("pierre"),
|
|
kong.Description("AI-powered Pull Request reviewer for Bitbucket"),
|
|
kong.UsageOnError(),
|
|
kong.Configuration(kongyaml.Loader, "config.yaml", "~/.config/pierre/config.yaml"),
|
|
)
|
|
|
|
// Initialize Bitbucket Adapter
|
|
bitbucket := gitadapters.NewBitbucket(cfg.Bitbucket.BaseURL, cfg.Bitbucket.Token)
|
|
|
|
// Fetch Diff using positional args
|
|
diff, err := bitbucket.GetDiff(cfg.Bitbucket.Project, cfg.Bitbucket.Repo, cfg.Bitbucket.PRID)
|
|
if err != nil {
|
|
log.Fatalf("Error fetching diff: %v", err)
|
|
}
|
|
|
|
// Initialize AI Adapter
|
|
|
|
var ai pierre.ChatAdapter
|
|
|
|
switch cfg.LLM.Provider {
|
|
case "gemini":
|
|
ai, err = chatter.NewGeminiAdapter(context.Background(), cfg.LLM.APIKey, cfg.LLM.Model)
|
|
case "ollama":
|
|
ai, err = chatter.NewOllamaAdapter(cfg.LLM.Endpoint, cfg.LLM.Model)
|
|
default:
|
|
log.Fatalf("%s is not a valid llm provider", cfg.LLM.Provider)
|
|
}
|
|
|
|
if err != nil {
|
|
log.Fatalf("Error initializing AI: %v", err)
|
|
}
|
|
|
|
// Run Logic
|
|
comments, err := pierre.JudgePR(context.Background(), ai, diff)
|
|
if err != nil {
|
|
log.Fatalf("Error judging PR: %v", err)
|
|
}
|
|
|
|
fmt.Printf("Analysis complete. Found %d issues.\n---\n", len(comments))
|
|
|
|
for _, c := range comments {
|
|
fmt.Printf("File: %s\nLine: %d\nMessage: %s\n%s\n",
|
|
c.File, c.Line, c.Message, "---")
|
|
}
|
|
}
|