log-analyzer/src/ai/analyzer.rs
2025-08-04 08:11:35 +07:00

356 lines
12 KiB
Rust

use crate::logcat::logcat_analyzer::LogcatAnalyzer;
use crate::model::{
ai::{AIAnalysisRequest, AIInsight, AnomalyType},
log_entry::{LogEntry, LogLevel},
};
pub struct LocalAiLogAnalyzer {
ollama_host: String,
model_name: String,
client: reqwest::Client,
}
impl LocalAiLogAnalyzer {
pub fn new(ollama_host: Option<String>, model_name: Option<String>) -> Self {
Self {
ollama_host: ollama_host.unwrap_or_else(|| "http://localhost:11434".to_string()),
model_name: model_name.unwrap_or_else(|| "llama3.1".to_string()),
client: reqwest::Client::new(),
}
}
pub async fn analyze_logs(
&self,
analyzer: &LogcatAnalyzer,
entries: &[&LogEntry],
) -> Result<Vec<AIInsight>, Box<dyn std::error::Error>> {
if !self.check_ollama_availability().await {
return Ok(self.generate_offline_insights(analyzer, entries));
}
let analysis_request = self.prepare_analysis_request(analyzer, entries);
let insights = self.call_ollama_api(analysis_request).await?;
Ok(insights)
}
fn prepare_analysis_request(
&self,
analyzer: &LogcatAnalyzer,
entries: &[&LogEntry],
) -> AIAnalysisRequest {
let log_summary = format!(
"Total entries: {}, Errors: {}, Crashes: {}, With source code: {}",
entries.len(),
entries
.iter()
.filter(|e| matches!(e.level, LogLevel::Error | LogLevel::Fatal))
.count(),
analyzer.find_crashes().len(),
entries
.iter()
.filter(|e| e.source_location.is_some())
.count()
);
let error_patterns: Vec<String> = entries
.iter()
.filter(|e| matches!(e.level, LogLevel::Error | LogLevel::Fatal))
.take(10)
.map(|e| {
let source_info = if let Some(source) = &e.source_location {
format!(
" [{}:{}]",
source.file_path,
source.line_number.unwrap_or(0)
)
} else {
String::new()
};
format!(
"[{}] {}: {}{}",
format!("{:?}", e.level),
e.tag,
e.message,
source_info
)
})
.collect();
let crash_entries: Vec<String> = analyzer
.find_crashes()
.iter()
.take(5)
.map(|e| {
let source_info = if let Some(source) = &e.source_location {
format!(
" [{}:{}]",
source.file_path,
source.line_number.unwrap_or(0)
)
} else {
String::new()
};
format!(
"[{}] {}: {}{}",
format!("{:?}", e.level),
e.tag,
e.message,
source_info
)
})
.collect();
let anomalies: Vec<String> = analyzer
.detect_anomalies()
.iter()
.take(5)
.map(|a| format!("{:?}: {}", a.anomaly_type, a.description))
.collect();
// Add source code context for critical entries
let source_code_context: Vec<String> = entries
.iter()
.filter(|e| {
e.source_location.is_some() && matches!(e.level, LogLevel::Error | LogLevel::Fatal)
})
.take(5)
.filter_map(|e| {
e.source_location.as_ref().map(|source| {
format!(
"File: {}, Method: {}, Context:\n{}",
source.file_path,
source.method_name.as_deref().unwrap_or("unknown"),
source.code_context.join("\n")
)
})
})
.collect();
// Add code quality issues if available
let code_quality_issues: Vec<String> =
if let Some(source_analyzer) = &analyzer.source_analyzer {
source_analyzer
.analyze_code_quality(entries)
.iter()
.map(|issue| {
format!(
"{:?}: {} ({})",
issue.issue_type, issue.description, issue.severity
)
})
.collect()
} else {
Vec::new()
};
AIAnalysisRequest {
log_summary,
error_patterns,
crash_entries,
anomalies,
source_code_context,
code_quality_issues,
context: "Android application logcat analysis with source code correlation".to_string(),
}
}
async fn check_ollama_availability(&self) -> bool {
match self
.client
.get(&format!("{}/api/tags", self.ollama_host))
.send()
.await
{
Ok(response) => response.status().is_success(),
Err(_) => false,
}
}
async fn call_ollama_api(
&self,
request: AIAnalysisRequest,
) -> Result<Vec<AIInsight>, Box<dyn std::error::Error>> {
let prompt = format!(
r#"You are an expert Android developer and log analysis specialist. Analyze the following Android logcat data WITH source code context and provide structured insights.
Log Summary: {}
Error Patterns: {:?}
Crash Entries: {:?}
Detected Anomalies: {:?}
Source Code Context: {:?}
Code Quality Issues: {:?}
IMPORTANT: You have access to source code context. Use this to provide deeper analysis about:
1. Root causes of errors based on the actual code
2. Specific code improvements and fixes
3. Code quality issues and best practices violations
4. Performance bottlenecks visible in the source
5. Security vulnerabilities in the code
Please analyze this data and provide insights in the following JSON format (respond with ONLY valid JSON, no additional text):
[{{
"category": "error_analysis",
"severity": "high",
"description": "Brief description of the issue with source code reference",
"recommendation": "Specific code changes and improvements",
"confidence": 0.8,
"source_file": "filename.java",
"line_number": 123
}}]
Categories can be: error_analysis, performance, crashes, security, code_quality, best_practices
Severity levels: low, medium, high, critical
Confidence should be between 0.0 and 1.0
Focus on actionable code-level insights that help developers fix specific issues in their Android application source code."#,
request.log_summary,
request.error_patterns,
request.crash_entries,
request.anomalies,
request.source_code_context,
request.code_quality_issues
);
let payload = serde_json::json!({
"model": self.model_name,
"prompt": prompt,
"stream": false,
"options": {
"temperature": 0.3,
"top_p": 0.9,
"max_tokens": 2000
}
});
let response = self
.client
.post(&format!("{}/api/generate", self.ollama_host))
.header("Content-Type", "application/json")
.json(&payload)
.send()
.await?;
if !response.status().is_success() {
return Err(format!("Ollama API error: {}", response.status()).into());
}
let response_json: serde_json::Value = response.json().await?;
if let Some(response_text) = response_json.get("response").and_then(|v| v.as_str()) {
self.parse_ollama_response(response_text)
} else {
Err("Invalid response format from Ollama".into())
}
}
fn parse_ollama_response(
&self,
response: &str,
) -> Result<Vec<AIInsight>, Box<dyn std::error::Error>> {
// Try to extract JSON from the response
let json_start = response.find('[').unwrap_or(0);
let json_end = response.rfind(']').map(|i| i + 1).unwrap_or(response.len());
let json_str = &response[json_start..json_end];
match serde_json::from_str::<Vec<AIInsight>>(json_str) {
Ok(insights) => Ok(insights),
Err(e) => {
println!("Warning: Failed to parse AI response as JSON: {}", e);
println!("Raw response: {}", response);
// Fallback to a generic insight
Ok(vec![AIInsight {
category: "ai_analysis".to_string(),
severity: "medium".to_string(),
description: "AI analysis completed with parsing issues".to_string(),
recommendation: "Review the log analysis manually for best results".to_string(),
confidence: 0.5,
}])
}
}
}
fn generate_offline_insights(
&self,
analyzer: &LogcatAnalyzer,
entries: &[&LogEntry],
) -> Vec<AIInsight> {
let mut insights = Vec::new();
// Analyze error frequency
let error_count = entries
.iter()
.filter(|e| matches!(e.level, LogLevel::Error))
.count();
let total_count = entries.len();
if error_count as f32 / total_count as f32 > 0.1 {
insights.push(AIInsight {
category: "error_analysis".to_string(),
severity: "high".to_string(),
description: format!(
"High error rate detected: {}/{} entries are errors",
error_count, total_count
),
recommendation: "Review error patterns and implement error handling".to_string(),
confidence: 0.9,
});
}
// Analyze crashes
let crashes = analyzer.find_crashes();
if !crashes.is_empty() {
insights.push(AIInsight {
category: "crashes".to_string(),
severity: "critical".to_string(),
description: format!("Found {} potential crashes or exceptions", crashes.len()),
recommendation: "Investigate crash logs and implement proper exception handling"
.to_string(),
confidence: 0.95,
});
}
// Analyze anomalies
let anomalies = analyzer.detect_anomalies();
for anomaly in anomalies {
let severity = match anomaly.severity {
s if s > 0.8 => "high",
s if s > 0.5 => "medium",
_ => "low",
};
insights.push(AIInsight {
category: "anomaly_detection".to_string(),
severity: severity.to_string(),
description: anomaly.description,
recommendation: self.get_anomaly_recommendation(&anomaly.anomaly_type),
confidence: anomaly.severity,
});
}
insights
}
fn get_anomaly_recommendation(&self, anomaly_type: &AnomalyType) -> String {
match anomaly_type {
AnomalyType::FrequencySpike => {
"Monitor system resources and optimize logging frequency".to_string()
}
AnomalyType::UnusualErrorPattern => {
"Investigate new error patterns and update error handling".to_string()
}
AnomalyType::MemoryLeak => {
"Profile memory usage and fix potential memory leaks".to_string()
}
AnomalyType::PerformanceDegradation => {
"Analyze performance bottlenecks and optimize critical paths".to_string()
}
AnomalyType::CrashLoop => {
"Fix underlying crash causes to prevent restart loops".to_string()
}
AnomalyType::SuspiciousActivity => {
"Review security implications and implement additional monitoring".to_string()
}
}
}
}