Add Langfuse-based LLM observability (#86)

* Add Langfuse-based LLM observability

* Document Langfuse configuration

* Don't hardcode model in use
This commit is contained in:
Juan José Mata
2025-08-06 14:23:07 -07:00
committed by GitHub
parent 099425d240
commit f6dde1a098
7 changed files with 128 additions and 14 deletions

View File

@@ -14,27 +14,47 @@ class Provider::Openai < Provider
MODELS.include?(model)
end
def auto_categorize(transactions: [], user_categories: [])
def auto_categorize(transactions: [], user_categories: [], model: "gpt-4.1-mini")
with_provider_response do
raise Error, "Too many transactions to auto-categorize. Max is 25 per request." if transactions.size > 25
AutoCategorizer.new(
result = AutoCategorizer.new(
client,
model: model,
transactions: transactions,
user_categories: user_categories
).auto_categorize
log_langfuse_generation(
name: "auto_categorize",
model: model,
input: { transactions: transactions, user_categories: user_categories },
output: result.map(&:to_h)
)
result
end
end
def auto_detect_merchants(transactions: [], user_merchants: [])
def auto_detect_merchants(transactions: [], user_merchants: [], model: "gpt-4.1-mini")
with_provider_response do
raise Error, "Too many transactions to auto-detect merchants. Max is 25 per request." if transactions.size > 25
AutoMerchantDetector.new(
result = AutoMerchantDetector.new(
client,
model: model,
transactions: transactions,
user_merchants: user_merchants
).auto_detect_merchants
log_langfuse_generation(
name: "auto_detect_merchants",
model: model,
input: { transactions: transactions, user_merchants: user_merchants },
output: result.map(&:to_h)
)
result
end
end
@@ -61,9 +81,11 @@ class Provider::Openai < Provider
nil
end
input_payload = chat_config.build_input(prompt)
raw_response = client.responses.create(parameters: {
model: model,
input: chat_config.build_input(prompt),
input: input_payload,
instructions: instructions,
tools: chat_config.tools,
previous_response_id: previous_response_id,
@@ -74,13 +96,50 @@ class Provider::Openai < Provider
# for the "response chunk" in the stream and return it (it is already parsed)
if stream_proxy.present?
response_chunk = collected_chunks.find { |chunk| chunk.type == "response" }
response_chunk.data
response = response_chunk.data
log_langfuse_generation(
name: "chat_response",
model: model,
input: input_payload,
output: response.messages.map(&:output_text).join("\n")
)
response
else
ChatParser.new(raw_response).parsed
parsed = ChatParser.new(raw_response).parsed
log_langfuse_generation(
name: "chat_response",
model: model,
input: input_payload,
output: parsed.messages.map(&:output_text).join("\n"),
usage: raw_response["usage"]
)
parsed
end
end
end
private
attr_reader :client
def langfuse_client
return unless ENV["LANGFUSE_PUBLIC_KEY"].present? && ENV["LANGFUSE_SECRET_KEY"].present?
@langfuse_client = Langfuse.new
end
def log_langfuse_generation(name:, model:, input:, output:, usage: nil)
return unless langfuse_client
trace = langfuse_client.trace(name: "openai.#{name}", input: input)
trace.generation(
name: name,
model: model,
input: input,
output: output,
usage: usage
)
trace.update(output: output)
rescue => e
Rails.logger.warn("Langfuse logging failed: #{e.message}")
end
end

View File

@@ -1,13 +1,14 @@
class Provider::Openai::AutoCategorizer
def initialize(client, transactions: [], user_categories: [])
def initialize(client, model: "", transactions: [], user_categories: [])
@client = client
@model = model
@transactions = transactions
@user_categories = user_categories
end
def auto_categorize
response = client.responses.create(parameters: {
model: "gpt-4.1-mini",
model: model,
input: [ { role: "developer", content: developer_message } ],
text: {
format: {
@@ -26,7 +27,7 @@ class Provider::Openai::AutoCategorizer
end
private
attr_reader :client, :transactions, :user_categories
attr_reader :client, :model, :transactions, :user_categories
AutoCategorization = Provider::LlmConcept::AutoCategorization

View File

@@ -1,13 +1,14 @@
class Provider::Openai::AutoMerchantDetector
def initialize(client, transactions:, user_merchants:)
def initialize(client, model: "", transactions:, user_merchants:)
@client = client
@model = model
@transactions = transactions
@user_merchants = user_merchants
end
def auto_detect_merchants
response = client.responses.create(parameters: {
model: "gpt-4.1-mini",
model: model,
input: [ { role: "developer", content: developer_message } ],
text: {
format: {
@@ -26,7 +27,7 @@ class Provider::Openai::AutoMerchantDetector
end
private
attr_reader :client, :transactions, :user_merchants
attr_reader :client, :model, :transactions, :user_merchants
AutoDetectedMerchant = Provider::LlmConcept::AutoDetectedMerchant