173 lines
6.8 KiB
C#
173 lines
6.8 KiB
C#
using System.Text.Json;
|
|
using FluentResults;
|
|
using Microsoft.EntityFrameworkCore;
|
|
using Microsoft.Extensions.AI;
|
|
using NexusReader.Application.Abstractions.Services;
|
|
using NexusReader.Application.DTOs.AI;
|
|
using NexusReader.Domain.Entities;
|
|
using NexusReader.Infrastructure.Helpers;
|
|
using NexusReader.Infrastructure.Persistence;
|
|
using Polly;
|
|
using Polly.Registry;
|
|
using Microsoft.Extensions.Options;
|
|
using NexusReader.Infrastructure.Configuration;
|
|
|
|
namespace NexusReader.Infrastructure.Services;
|
|
|
|
public class KnowledgeService : IKnowledgeService
|
|
{
|
|
private readonly IChatClient _chatClient;
|
|
private readonly AppDbContext _dbContext;
|
|
private readonly ResiliencePipeline _retryPipeline;
|
|
private readonly AiSettings _settings;
|
|
private const string PromptVersion = "1.0";
|
|
|
|
public KnowledgeService(
|
|
IChatClient chatClient,
|
|
AppDbContext dbContext,
|
|
ResiliencePipelineProvider<string> pipelineProvider,
|
|
IOptions<AiSettings> settings)
|
|
{
|
|
_chatClient = chatClient;
|
|
_dbContext = dbContext;
|
|
_retryPipeline = pipelineProvider.GetPipeline("ai-retry");
|
|
_settings = settings.Value;
|
|
}
|
|
|
|
public async Task<Result<KnowledgePacket>> GetKnowledgeAsync(string text, CancellationToken cancellationToken = default)
|
|
{
|
|
if (string.IsNullOrWhiteSpace(text))
|
|
{
|
|
return Result.Fail("Input text is empty.");
|
|
}
|
|
|
|
Console.WriteLine($"[KnowledgeService] Starting extraction for text: {text.Substring(0, Math.Min(text.Length, 50))}...");
|
|
|
|
// Normalize text to ensure consistent hashing and reduce token noise
|
|
var normalizedText = ContentHasher.Normalize(text);
|
|
|
|
// Phase 4: Request Pre-processing (Token Saving)
|
|
if (normalizedText.Length > _settings.MaxInputLength)
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] Error: Input too long ({normalizedText.Length} > {_settings.MaxInputLength})");
|
|
return Result.Fail($"Input text is too long ({normalizedText.Length} characters after normalization). Max allowed is {_settings.MaxInputLength}.");
|
|
}
|
|
|
|
// Simple token estimation (4 chars per token)
|
|
var estimatedTokens = normalizedText.Length / 4;
|
|
Console.WriteLine($"[KnowledgeService] Processing request with ~{estimatedTokens} tokens.");
|
|
|
|
var hash = ContentHasher.ComputeHash(normalizedText);
|
|
|
|
// 1. Check Cache
|
|
var cached = await _dbContext.SemanticKnowledgeCache
|
|
.FirstOrDefaultAsync(c => c.ContentHash == hash && c.PromptVersion == PromptVersion, cancellationToken);
|
|
|
|
if (cached != null)
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] Cache hit for hash: {hash}");
|
|
try
|
|
{
|
|
var packet = JsonSerializer.Deserialize<KnowledgePacket>(cached.JsonData);
|
|
if (packet != null)
|
|
{
|
|
return Result.Ok(packet);
|
|
}
|
|
}
|
|
catch (JsonException ex)
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] Cache deserialization error: {ex.Message}");
|
|
}
|
|
}
|
|
|
|
// 2. Call AI Client
|
|
try
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] Calling Gemini AI with Model: {_settings.Model}...");
|
|
var options = new ChatOptions
|
|
{
|
|
// ResponseFormat = ChatResponseFormat.Json, // Disabled due to GeminiMappingException in current library version
|
|
Temperature = (float)_settings.Temperature,
|
|
MaxOutputTokens = _settings.MaxOutputTokens
|
|
};
|
|
|
|
var response = await _retryPipeline.ExecuteAsync(async ct =>
|
|
await _chatClient.GetResponseAsync(new List<ChatMessage>
|
|
{
|
|
new ChatMessage(ChatRole.System, PromptRegistry.KnowledgeExtractionSystemPrompt),
|
|
new ChatMessage(ChatRole.User, normalizedText)
|
|
}, options, cancellationToken: ct), cancellationToken);
|
|
|
|
var jsonResponse = response.Text;
|
|
if (string.IsNullOrWhiteSpace(jsonResponse))
|
|
{
|
|
Console.WriteLine("[KnowledgeService] AI returned empty response.");
|
|
return Result.Fail("AI returned an empty response.");
|
|
}
|
|
|
|
Console.WriteLine($"[KnowledgeService] AI Response received ({jsonResponse.Length} chars).");
|
|
|
|
// Cleanup potential markdown if Gemini still adds it despite options
|
|
jsonResponse = jsonResponse.Replace("```json", "").Replace("```", "").Trim();
|
|
|
|
var knowledgePacket = JsonSerializer.Deserialize<KnowledgePacket>(jsonResponse);
|
|
if (knowledgePacket == null)
|
|
{
|
|
Console.WriteLine("[KnowledgeService] Failed to deserialize JSON response.");
|
|
return Result.Fail("Failed to deserialize AI response.");
|
|
}
|
|
|
|
// 3. Save to Cache
|
|
Console.WriteLine("[KnowledgeService] Saving result to cache...");
|
|
var cacheEntry = new SemanticKnowledgeCache
|
|
{
|
|
ContentHash = hash,
|
|
JsonData = jsonResponse,
|
|
ModelId = _settings.Model,
|
|
PromptVersion = PromptVersion,
|
|
CreatedAt = DateTime.UtcNow
|
|
};
|
|
|
|
if (cached == null)
|
|
{
|
|
_dbContext.SemanticKnowledgeCache.Add(cacheEntry);
|
|
}
|
|
else
|
|
{
|
|
cached.JsonData = jsonResponse;
|
|
cached.CreatedAt = DateTime.UtcNow;
|
|
}
|
|
|
|
await _dbContext.SaveChangesAsync(cancellationToken);
|
|
Console.WriteLine("[KnowledgeService] Extraction successful.");
|
|
|
|
return Result.Ok(knowledgePacket);
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] CRITICAL ERROR: {ex.GetType().Name}: {ex.Message}");
|
|
if (ex.InnerException != null)
|
|
Console.WriteLine($"[KnowledgeService] Inner Error: {ex.InnerException.Message}");
|
|
|
|
return Result.Fail(new Error("Failed to extract knowledge from AI").CausedBy(ex));
|
|
}
|
|
}
|
|
|
|
public async Task<Result> ClearCacheAsync(CancellationToken cancellationToken = default)
|
|
{
|
|
try
|
|
{
|
|
Console.WriteLine("[KnowledgeService] Clearing SemanticKnowledgeCache...");
|
|
_dbContext.SemanticKnowledgeCache.RemoveRange(_dbContext.SemanticKnowledgeCache);
|
|
await _dbContext.SaveChangesAsync(cancellationToken);
|
|
Console.WriteLine("[KnowledgeService] Cache cleared successfully.");
|
|
return Result.Ok();
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
Console.WriteLine($"[KnowledgeService] Error clearing cache: {ex.Message}");
|
|
return Result.Fail($"Failed to clear cache: {ex.Message}");
|
|
}
|
|
}
|
|
}
|