Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 16 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ lamp <command> [flags]
- `--thinking-budget <tokens>`: Token budget for Claude's extended thinking mode
- `--ollama-host <url>`: Ollama server URL (default: http://localhost:11434)
- `--ollama-timeout <seconds>`: Ollama request timeout (default: 120)
- `--include-config`: Include Mattermost configuration in AI analysis (support-packet only)

#### Filtering Options
- `--search <term>`: Search term to filter logs
Expand Down Expand Up @@ -264,8 +265,11 @@ Support packet AI analysis:
export ANTHROPIC_API_KEY=YOUR_API_KEY
lamp support-packet mattermost_support_packet.zip --ai-analyze

# Use Ollama for local analysis
lamp support-packet mattermost_support_packet.zip --ai-analyze --llm-provider ollama
# Include Mattermost configuration for comprehensive analysis
lamp support-packet mattermost_support_packet.zip --ai-analyze --include-config

# Use Ollama for local analysis with configuration
lamp support-packet mattermost_support_packet.zip --ai-analyze --include-config --llm-provider ollama
```

## Output Formats
Expand Down Expand Up @@ -309,14 +313,21 @@ The parser supports both traditional Mattermost log formats and the newer JSON-f

## Support Packet Processing

The tool can extract and parse log files from Mattermost support packets. Support packets are ZIP files that contain server logs, configuration information, and diagnostic data. When using the `--support-packet` option, the tool will:
The tool can extract and parse log files from Mattermost support packets. Support packets are ZIP files that contain server logs, configuration information, and diagnostic data. When using the `support-packet` command, the tool will:

1. Extract log files from the ZIP archive
2. Parse each log file
3. Apply any specified filters (search term, level, user)
4. Display the combined results

This is particularly useful for analyzing logs from multi-node Mattermost deployments where each node's logs are included in the support packet.
For AI analysis, you can also include the Mattermost configuration:

- Use `--include-config` to extract and include `sanitized_config.json` in the AI analysis
- This provides additional context about server configuration, helping AI identify misconfigurations
- Configuration data is only included when explicitly requested with the flag
- The AI can then correlate log issues with configuration settings for more comprehensive insights

This is particularly useful for analyzing logs from multi-node Mattermost deployments where each node's logs are included in the support packet, and when you need to understand how configuration affects the observed issues.

## Log Analysis

Expand Down Expand Up @@ -375,6 +386,7 @@ The `--ai-analyze` option uses AI to provide an intelligent analysis of your log
- Identifies potential root causes for errors
- Offers recommendations for resolution
- Gives context and insights that might not be obvious from statistical analysis
- For support packets: optionally includes Mattermost configuration data (`--include-config`) to identify misconfigurations and provide configuration-specific recommendations

**Supported LLM providers:**
- **Anthropic Claude** (default) - Get API key from [console.anthropic.com](https://console.anthropic.com/)
Expand Down
73 changes: 53 additions & 20 deletions analyzer_llm.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ type AnalysisPrompt struct {
}

// analyzeWithLLM routes the log analysis to the appropriate LLM provider
func analyzeWithLLM(logs []LogEntry, config LLMConfig) error {
func analyzeWithLLM(logs []LogEntry, config LLMConfig, configContent string) error {
Copy link

Copilot AI May 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] To simplify function signatures and reduce duplication, consider adding ConfigContent as a field on LLMConfig instead of passing it as a separate parameter through all analyzeWith* functions.

Suggested change
func analyzeWithLLM(logs []LogEntry, config LLMConfig, configContent string) error {
func analyzeWithLLM(logs []LogEntry, config LLMConfig) error {

Copilot uses AI. Check for mistakes.
// If the API key is not provided and we're not using Ollama (which doesn't need a key),
// try to get it from the environment
if config.APIKey == "" && config.Provider != ProviderOllama {
Expand All @@ -71,13 +71,13 @@ func analyzeWithLLM(logs []LogEntry, config LLMConfig) error {
// Route to the appropriate provider
switch config.Provider {
case ProviderAnthropic:
return analyzeWithAnthropic(logs, config)
return analyzeWithAnthropic(logs, config, configContent)
case ProviderOpenAI:
return analyzeWithOpenAI(logs, config)
return analyzeWithOpenAI(logs, config, configContent)
case ProviderGemini:
return analyzeWithGemini(logs, config)
return analyzeWithGemini(logs, config, configContent)
case ProviderOllama:
return analyzeWithOllama(logs, config)
return analyzeWithOllama(logs, config, configContent)
default:
return fmt.Errorf("unsupported LLM provider: %s", config.Provider)
}
Expand Down Expand Up @@ -154,7 +154,7 @@ func formatLogsForAnalysis(logs []LogEntry) (string, int, bool) {
}

// prepareAnalysisPrompts generates system and user prompts for log analysis
func prepareAnalysisPrompts(logs []LogEntry, config LLMConfig) (AnalysisPrompt, error) {
func prepareAnalysisPrompts(logs []LogEntry, config LLMConfig, configContent string) (AnalysisPrompt, error) {
var prompt AnalysisPrompt

// If maxEntries is not set (0), use the default
Expand All @@ -177,16 +177,26 @@ func prepareAnalysisPrompts(logs []LogEntry, config LLMConfig) (AnalysisPrompt,
prompt.LogText = logText
prompt.HasDuplicates = hasDuplicates

// Create appropriate preface based on duplication
// Include sanitized_config.json if available from support packet
var configText string
if configContent != "" {
configText = configContent
logger.Debug("Including sanitized_config.json in AI analysis", "size", len(configText))
}

// Create appropriate preface based on duplication and config inclusion
entryDescription := fmt.Sprintf("%d Mattermost server log entries", len(logsToAnalyze))
if hasDuplicates {
entryDescription = fmt.Sprintf("%d unique Mattermost server log entries representing %d total log entries",
len(logsToAnalyze), totalEntries)
}
if configText != "" {
entryDescription += " and the sanitized Mattermost configuration"
}
prompt.Description = entryDescription

// Create the system prompt
prompt.SystemPrompt = `You are an expert log analyzer for Mattermost server logs.
systemPromptBase := `You are an expert log analyzer for Mattermost server logs.
Analyze the provided logs and provide a comprehensive report including:

1. A high-level summary of what's happening in the logs
Expand All @@ -202,6 +212,17 @@ Format your entire response in Markdown for easy reading and sharing. Use approp

Focus on actionable insights and be specific about what you find.`

if configText != "" {
systemPromptBase += `

When configuration data is provided, also consider:
- Configuration settings that might be related to the issues in the logs
- Misconfigurations that could be causing problems
- Recommended configuration changes based on the log patterns`
}

prompt.SystemPrompt = systemPromptBase

// Use a more concise prompt with thinking mode
if config.ThinkingBudget > 0 {
prompt.SystemPrompt = `You are an expert log analyzer for Mattermost server logs. Analyze these logs and identify issues, patterns, and solutions. Format your entire response in Markdown.`
Expand All @@ -210,27 +231,39 @@ Focus on actionable insights and be specific about what you find.`
if hasDuplicates {
prompt.SystemPrompt += ` Some log entries may be marked with repetition counts, indicating they appeared multiple times.`
}
// Add information about configuration if included
if configText != "" {
prompt.SystemPrompt += ` Configuration data is also provided - use it to identify misconfigurations and provide configuration recommendations.`
}
}

// Create the user prompt
var userPromptText string
if config.Problem != "" {
if config.ThinkingBudget > 0 {
prompt.UserPrompt = fmt.Sprintf("I'm investigating this problem: %s\n\nHere are %s to analyze:\n\n%s",
userPromptText = fmt.Sprintf("I'm investigating this problem: %s\n\nHere are %s to analyze:\n\n%s",
config.Problem, entryDescription, logText)
} else {
prompt.UserPrompt = fmt.Sprintf("I'm investigating this problem: %s\n\nHere are %s to analyze:\n\n%s\n\nPlease provide a detailed analysis of these logs focusing on the problem I described.",
userPromptText = fmt.Sprintf("I'm investigating this problem: %s\n\nHere are %s to analyze:\n\n%s\n\nPlease provide a detailed analysis of these logs focusing on the problem I described.",
config.Problem, entryDescription, logText)
}
} else {
if config.ThinkingBudget > 0 {
prompt.UserPrompt = fmt.Sprintf("Here are %s to analyze:\n\n%s",
userPromptText = fmt.Sprintf("Here are %s to analyze:\n\n%s",
entryDescription, logText)
} else {
prompt.UserPrompt = fmt.Sprintf("Here are %s to analyze:\n\n%s\n\nPlease provide a detailed analysis of these logs.",
userPromptText = fmt.Sprintf("Here are %s to analyze:\n\n%s\n\nPlease provide a detailed analysis of these logs.",
entryDescription, logText)
}
}

// Add configuration data if available
if configText != "" {
userPromptText += "\n\n## Mattermost Configuration (sanitized_config.json)\n\n```json\n" + configText + "\n```"
}

prompt.UserPrompt = userPromptText

return prompt, nil
}

Expand Down Expand Up @@ -318,7 +351,7 @@ type AnthropicError struct {
}

// analyzeWithAnthropic sends log data to Anthropic API for analysis
func analyzeWithAnthropic(logs []LogEntry, config LLMConfig) error {
func analyzeWithAnthropic(logs []LogEntry, config LLMConfig, configContent string) error {
// Get model info if available
modelName := config.Model
if modelName == "" {
Expand All @@ -336,7 +369,7 @@ func analyzeWithAnthropic(logs []LogEntry, config LLMConfig) error {
}

// Prepare prompts and logs
prompt, err := prepareAnalysisPrompts(logs, config)
prompt, err := prepareAnalysisPrompts(logs, config, configContent)
if err != nil {
return err
}
Expand Down Expand Up @@ -646,7 +679,7 @@ type OllamaResponse struct {
}

// analyzeWithGemini sends log data to Gemini API for analysis
func analyzeWithGemini(logs []LogEntry, config LLMConfig) error {
func analyzeWithGemini(logs []LogEntry, config LLMConfig, configContent string) error {
// Get model info if available
modelName := config.Model
if modelName == "" {
Expand All @@ -664,7 +697,7 @@ func analyzeWithGemini(logs []LogEntry, config LLMConfig) error {
}

// Prepare prompts and logs
prompt, err := prepareAnalysisPrompts(logs, config)
prompt, err := prepareAnalysisPrompts(logs, config, configContent)
if err != nil {
return err
}
Expand Down Expand Up @@ -766,7 +799,7 @@ func analyzeWithGemini(logs []LogEntry, config LLMConfig) error {
}

// analyzeWithOllama sends log data to a local Ollama instance for analysis
func analyzeWithOllama(logs []LogEntry, config LLMConfig) error {
func analyzeWithOllama(logs []LogEntry, config LLMConfig, configContent string) error {
// Get model info if available
modelName := config.Model
if modelName == "" {
Expand All @@ -784,7 +817,7 @@ func analyzeWithOllama(logs []LogEntry, config LLMConfig) error {
}

// Prepare prompts and logs
prompt, err := prepareAnalysisPrompts(logs, config)
prompt, err := prepareAnalysisPrompts(logs, config, configContent)
if err != nil {
return err
}
Expand Down Expand Up @@ -875,7 +908,7 @@ func analyzeWithOllama(logs []LogEntry, config LLMConfig) error {
}

// analyzeWithOpenAI sends log data to OpenAI API for analysis
func analyzeWithOpenAI(logs []LogEntry, config LLMConfig) error {
func analyzeWithOpenAI(logs []LogEntry, config LLMConfig, configContent string) error {
// Get model info if available
modelName := config.Model
if modelName == "" {
Expand All @@ -893,7 +926,7 @@ func analyzeWithOpenAI(logs []LogEntry, config LLMConfig) error {
}

// Prepare prompts and logs
prompt, err := prepareAnalysisPrompts(logs, config)
prompt, err := prepareAnalysisPrompts(logs, config, configContent)
if err != nil {
return err
}
Expand Down
20 changes: 11 additions & 9 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ var (
interactive bool
verbose bool
quiet bool
includeConfig bool
verboseAnalysis bool
rawOutput bool

Expand Down Expand Up @@ -78,7 +79,7 @@ var fileCmd = &cobra.Command{
return fmt.Errorf("error parsing log file: %v", err)
}

return processLogs(logs)
return processLogs(logs, "")
} else {
// Multiple files mode
var allLogs []LogEntry
Expand Down Expand Up @@ -128,7 +129,7 @@ var fileCmd = &cobra.Command{
})

logger.Info("Finished processing files", "total_files", len(args), "total_entries", len(allLogs))
return processLogs(allLogs)
return processLogs(allLogs, "")
}
},
}
Expand All @@ -154,7 +155,7 @@ var notificationCmd = &cobra.Command{
return fmt.Errorf("error parsing notification log file: %v", err)
}

return processLogs(logs)
return processLogs(logs, "")
},
}

Expand All @@ -174,16 +175,16 @@ var supportPacketCmd = &cobra.Command{
return fmt.Errorf("support packet '%s' does not exist", packetPath)
}

logs, err := parseSupportPacket(packetPath, searchTerm, regexSearch, levelFilter, userFilter, startTime, endTime)
result, err := parseSupportPacket(packetPath, searchTerm, regexSearch, levelFilter, userFilter, startTime, endTime)
if err != nil {
return fmt.Errorf("error parsing support packet: %v", err)
}

if verbose {
fmt.Printf("Debug: processing %d log entries\n", len(logs))
logger.Debug("Debug: processing log entries", "count", len(result.Logs))
}

return processLogs(logs)
return processLogs(result.Logs, result.ConfigContent)
},
}

Expand Down Expand Up @@ -297,6 +298,7 @@ func init() {
cmd.Flags().BoolVar(&interactive, "interactive", false, "Launch interactive TUI mode")
cmd.Flags().BoolVar(&verbose, "verbose", false, "Enable verbose output logging")
cmd.Flags().BoolVar(&quiet, "quiet", false, "Only output errors")
cmd.Flags().BoolVar(&includeConfig, "include-config", false, "Include configuration in AI analysis (support-packet only)")
cmd.Flags().BoolVar(&verboseAnalysis, "verbose-analysis", false, "Show detailed analysis with all sections")
cmd.Flags().BoolVar(&rawOutput, "raw", false, "Output raw log entries instead of analysis (old default behavior)")

Expand Down Expand Up @@ -342,7 +344,7 @@ func init() {
})

// Add boolean flag completion
for _, flag := range []string{"json", "analyze", "ai-analyze", "trim", "interactive", "verbose", "quiet", "verbose-analysis", "raw"} {
for _, flag := range []string{"json", "analyze", "ai-analyze", "trim", "interactive", "verbose", "quiet", "include-config", "verbose-analysis", "raw"} {
registerFlagCompletion(cmd, flag, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"true", "false"}, cobra.ShellCompDirectiveNoFileComp
})
Expand Down Expand Up @@ -375,7 +377,7 @@ func contains(slice []string, str string) bool {
}

// processLogs handles the common log processing logic
func processLogs(logs []LogEntry) error {
func processLogs(logs []LogEntry, configContent string) error {
// Note: Filtering is already applied during log parsing in parseLogFile
// so by the time logs reach this function, they're already filtered

Expand Down Expand Up @@ -501,7 +503,7 @@ func processLogs(logs []LogEntry) error {
ThinkingBudget: thinkingBudget,
}

if err := analyzeWithLLM(logs, config); err != nil {
if err := analyzeWithLLM(logs, config, configContent); err != nil {
return fmt.Errorf("error during LLM analysis: %v", err)
}
case analyze:
Expand Down
Loading