LLM cost estimation (#223)

* Password reset back button also after confirmation

Signed-off-by: Juan José Mata <juanjo.mata@gmail.com>

* Implement a filter for category (#215)

- Also implement an is empty/is null condition.

* Implement an LLM cost estimation page

Track costs across all the cost categories: auto categorization, auto merchant detection and chat.
Show warning with estimated cost when running a rule that contains AI.

* Update pricing

* Add google pricing

and fix inferred model everywhere.

* Update app/models/llm_usage.rb

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Signed-off-by: soky srm <sokysrm@gmail.com>

* FIX address review

* Linter

* Address review

- Lowered log level
- extracted the duplicated record_usage method into a shared concern

* Update app/controllers/settings/llm_usages_controller.rb

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Signed-off-by: soky srm <sokysrm@gmail.com>

* Moved attr_reader out of private

---------

Signed-off-by: Juan José Mata <juanjo.mata@gmail.com>
Signed-off-by: soky srm <sokysrm@gmail.com>
Co-authored-by: Juan José Mata <juanjo.mata@gmail.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
This commit is contained in:
soky srm
2025-10-24 00:08:59 +02:00
committed by GitHub
parent 4999409082
commit bb364fab38
19 changed files with 651 additions and 21 deletions

View File

@@ -38,6 +38,20 @@ class RulesController < ApplicationController
end
def confirm
# Compute provider, model, and cost estimation for auto-categorize actions
if @rule.actions.any? { |a| a.action_type == "auto_categorize" }
# Use the same provider determination logic as Family::AutoCategorizer
llm_provider = Provider::Registry.get_provider(:openai)
if llm_provider
@selected_model = Provider::Openai.effective_model
@estimated_cost = LlmUsage.estimate_auto_categorize_cost(
transaction_count: @rule.affected_resource_count,
category_count: @rule.family.categories.count,
model: @selected_model
)
end
end
end
def edit

View File

@@ -0,0 +1,35 @@
class Settings::LlmUsagesController < ApplicationController
layout "settings"
def show
@breadcrumbs = [
[ "Home", root_path ],
[ "LLM Usage", nil ]
]
@family = Current.family
# Get date range from params or default to last 30 days
def safe_parse_date(s)
Date.iso8601(s)
rescue ArgumentError, TypeError
nil
end
private
@end_date = safe_parse_date(params[:end_date]) || Date.today
@start_date = safe_parse_date(params[:start_date]) || (@end_date - 30.days)
if @start_date > @end_date
@start_date, @end_date = @end_date - 30.days, @end_date
end
# Get usage data
@llm_usages = @family.llm_usages
.for_date_range(@start_date.beginning_of_day, @end_date.end_of_day)
.recent
.limit(100)
# Get statistics
@statistics = LlmUsage.statistics_for_family(@family, start_date: @start_date.beginning_of_day, end_date: @end_date.end_of_day)
end
end

View File

@@ -82,7 +82,8 @@ class Assistant::Responder
streamer: streamer,
previous_response_id: previous_response_id,
session_id: chat_session_id,
user_identifier: chat_user_identifier
user_identifier: chat_user_identifier,
family: message.chat&.user&.family
)
unless response.success?

View File

@@ -34,6 +34,8 @@ class Family < ApplicationRecord
has_many :budgets, dependent: :destroy
has_many :budget_categories, through: :budgets
has_many :llm_usages, dependent: :destroy
validates :locale, inclusion: { in: I18n.available_locales.map(&:to_s) }
validates :date_format, inclusion: { in: DATE_FORMATS.map(&:last) }

View File

@@ -18,7 +18,8 @@ class Family::AutoCategorizer
result = llm_provider.auto_categorize(
transactions: transactions_input,
user_categories: user_categories_input
user_categories: user_categories_input,
family: family
)
unless result.success?

View File

@@ -18,7 +18,8 @@ class Family::AutoMerchantDetector
result = llm_provider.auto_detect_merchants(
transactions: transactions_input,
user_merchants: user_merchants_input
user_merchants: user_merchants_input,
family: family
)
unless result.success?

157
app/models/llm_usage.rb Normal file
View File

@@ -0,0 +1,157 @@
class LlmUsage < ApplicationRecord
belongs_to :family
validates :provider, :model, :operation, presence: true
validates :prompt_tokens, :completion_tokens, :total_tokens, presence: true, numericality: { greater_than_or_equal_to: 0 }
validates :estimated_cost, numericality: { greater_than_or_equal_to: 0 }, allow_nil: true
scope :for_family, ->(family) { where(family: family) }
scope :for_operation, ->(operation) { where(operation: operation) }
scope :recent, -> { order(created_at: :desc) }
scope :for_date_range, ->(start_date, end_date) { where(created_at: start_date..end_date) }
# OpenAI pricing per 1M tokens (as of Oct 2025)
# Source: https://platform.openai.com/docs/pricing
PRICING = {
"openai" => {
# GPT-4.1 and similar models
"gpt-4.1" => { prompt: 2.00, completion: 8.00 },
"gpt-4.1-mini" => { prompt: 0.40, completion: 1.60 },
"gpt-4.1-nano" => { prompt: 0.40, completion: 1.60 },
# 4o
"gpt-4o" => { prompt: 2.50, completion: 10.00 },
"gpt-4o-mini" => { prompt: 0.15, completion: 0.60 },
# GPT-5 models (estimated pricing)
"gpt-5" => { prompt: 1.25, completion: 10.00 },
"gpt-5-mini" => { prompt: 0.25, completion: 2.00 },
"gpt-5-nano" => { prompt: 0.05, completion: 0.40 },
"gpt-5-pro" => { prompt: 15.00, completion: 120.00 },
# o1 models
"o1-mini" => { prompt: 1.10, completion: 4.40 },
"o1" => { prompt: 15.00, completion: 60.00 },
# o3 models (estimated pricing)
"o3" => { prompt: 2.00, completion: 8.00 },
"o3-mini" => { prompt: 1.10, completion: 4.40 },
"o3-pro" => { prompt: 20.00, completion: 80.00 }
},
"google" => {
"gemini-2.5-pro" => { prompt: 1.25, completion: 10.00 },
"gemini-2.5-flash" => { prompt: 0.3, completion: 2.50 }
}
}.freeze
# Calculate cost for a model and token usage
# Provider is automatically inferred from the model using the pricing map
# Returns nil if pricing is not available for the model (e.g., custom/self-hosted providers)
def self.calculate_cost(model:, prompt_tokens:, completion_tokens:)
provider = infer_provider(model)
pricing = find_pricing(provider, model)
unless pricing
Rails.logger.info("No pricing found for model: #{model} (inferred provider: #{provider})")
return nil
end
# Pricing is per 1M tokens, so divide by 1_000_000
prompt_cost = (prompt_tokens * pricing[:prompt]) / 1_000_000.0
completion_cost = (completion_tokens * pricing[:completion]) / 1_000_000.0
cost = (prompt_cost + completion_cost).round(6)
Rails.logger.info("Calculated cost for #{provider}/#{model}: $#{cost} (#{prompt_tokens} prompt tokens, #{completion_tokens} completion tokens)")
cost
end
# Find pricing for a model, with prefix matching support
def self.find_pricing(provider, model)
return nil unless PRICING.key?(provider)
provider_pricing = PRICING[provider]
# Try exact match first
return provider_pricing[model] if provider_pricing.key?(model)
# Try prefix matching (e.g., "gpt-4.1-2024-08-06" matches "gpt-4.1")
provider_pricing.each do |model_prefix, pricing|
return pricing if model.start_with?(model_prefix)
end
nil
end
# Infer provider from model name by checking which provider has pricing for it
# Returns the provider name if found, or "openai" as default (for backward compatibility)
def self.infer_provider(model)
return "openai" if model.blank?
# Check each provider to see if they have pricing for this model
PRICING.each do |provider_name, provider_pricing|
# Try exact match first
return provider_name if provider_pricing.key?(model)
# Try prefix matching
provider_pricing.each_key do |model_prefix|
return provider_name if model.start_with?(model_prefix)
end
end
# Default to "openai" if no pricing found (for custom/self-hosted models)
"openai"
end
# Aggregate statistics for a family
def self.statistics_for_family(family, start_date: nil, end_date: nil)
scope = for_family(family)
scope = scope.for_date_range(start_date, end_date) if start_date && end_date
# Exclude records with nil cost from cost calculations
scope_with_cost = scope.where.not(estimated_cost: nil)
requests_with_cost = scope_with_cost.count
total_cost = scope_with_cost.sum(:estimated_cost).to_f.round(2)
avg_cost = requests_with_cost > 0 ? (total_cost / requests_with_cost).round(4) : 0.0
{
total_requests: scope.count,
requests_with_cost: requests_with_cost,
total_prompt_tokens: scope.sum(:prompt_tokens),
total_completion_tokens: scope.sum(:completion_tokens),
total_tokens: scope.sum(:total_tokens),
total_cost: total_cost,
avg_cost: avg_cost,
by_operation: scope_with_cost.group(:operation).sum(:estimated_cost).transform_values { |v| v.to_f.round(2) },
by_model: scope_with_cost.group(:model).sum(:estimated_cost).transform_values { |v| v.to_f.round(2) }
}
end
# Format cost as currency
def formatted_cost
estimated_cost.nil? ? "N/A" : "$#{estimated_cost.round(4)}"
end
# Estimate cost for auto-categorizing a batch of transactions
# Based on typical token usage patterns:
# - ~100 tokens per transaction in the prompt
# - ~50 tokens per category
# - ~50 tokens for completion per transaction
# Returns nil if pricing is not available for the model
def self.estimate_auto_categorize_cost(transaction_count:, category_count:, model: "gpt-4.1")
return 0.0 if transaction_count.zero?
# Estimate tokens
base_prompt_tokens = 150 # System message and instructions
transaction_tokens = transaction_count * 100
category_tokens = category_count * 50
estimated_prompt_tokens = base_prompt_tokens + transaction_tokens + category_tokens
# Completion tokens: roughly one category name per transaction
estimated_completion_tokens = transaction_count * 50
# calculate_cost will automatically infer the provider from the model
# Returns nil if pricing is not available
calculate_cost(
model: model,
prompt_tokens: estimated_prompt_tokens,
completion_tokens: estimated_completion_tokens
)
end
end

View File

@@ -8,6 +8,13 @@ class Provider::Openai < Provider
DEFAULT_OPENAI_MODEL_PREFIXES = %w[gpt-4 gpt-5 o1 o3]
DEFAULT_MODEL = "gpt-4.1"
# Returns the effective model that would be used by the provider
# Uses the same logic as Provider::Registry and the initializer
def self.effective_model
configured_model = ENV.fetch("OPENAI_MODEL", Setting.openai_model)
configured_model.presence || DEFAULT_MODEL
end
def initialize(access_token, uri_base: nil, model: nil)
client_options = { access_token: access_token }
client_options[:uri_base] = uri_base if uri_base.present?
@@ -32,7 +39,7 @@ class Provider::Openai < Provider
@uri_base.present?
end
def auto_categorize(transactions: [], user_categories: [], model: "")
def auto_categorize(transactions: [], user_categories: [], model: "", family: nil)
with_provider_response do
raise Error, "Too many transactions to auto-categorize. Max is 25 per request." if transactions.size > 25
@@ -49,7 +56,8 @@ class Provider::Openai < Provider
transactions: transactions,
user_categories: user_categories,
custom_provider: custom_provider?,
langfuse_trace: trace
langfuse_trace: trace,
family: family
).auto_categorize
trace&.update(output: result.map(&:to_h))
@@ -58,7 +66,7 @@ class Provider::Openai < Provider
end
end
def auto_detect_merchants(transactions: [], user_merchants: [], model: "")
def auto_detect_merchants(transactions: [], user_merchants: [], model: "", family: nil)
with_provider_response do
raise Error, "Too many transactions to auto-detect merchants. Max is 25 per request." if transactions.size > 25
@@ -75,7 +83,8 @@ class Provider::Openai < Provider
transactions: transactions,
user_merchants: user_merchants,
custom_provider: custom_provider?,
langfuse_trace: trace
langfuse_trace: trace,
family: family
).auto_detect_merchants
trace&.update(output: result.map(&:to_h))
@@ -93,7 +102,8 @@ class Provider::Openai < Provider
streamer: nil,
previous_response_id: nil,
session_id: nil,
user_identifier: nil
user_identifier: nil,
family: nil
)
if custom_provider?
generic_chat_response(
@@ -104,7 +114,8 @@ class Provider::Openai < Provider
function_results: function_results,
streamer: streamer,
session_id: session_id,
user_identifier: user_identifier
user_identifier: user_identifier,
family: family
)
else
native_chat_response(
@@ -116,7 +127,8 @@ class Provider::Openai < Provider
streamer: streamer,
previous_response_id: previous_response_id,
session_id: session_id,
user_identifier: user_identifier
user_identifier: user_identifier,
family: family
)
end
end
@@ -133,7 +145,8 @@ class Provider::Openai < Provider
streamer: nil,
previous_response_id: nil,
session_id: nil,
user_identifier: nil
user_identifier: nil,
family: nil
)
with_provider_response do
chat_config = ChatConfig.new(
@@ -175,6 +188,7 @@ class Provider::Openai < Provider
response_chunk = collected_chunks.find { |chunk| chunk.type == "response" }
response = response_chunk.data
usage = response_chunk.usage
Rails.logger.debug("Stream response usage: #{usage.inspect}")
log_langfuse_generation(
name: "chat_response",
model: model,
@@ -184,9 +198,11 @@ class Provider::Openai < Provider
session_id: session_id,
user_identifier: user_identifier
)
record_llm_usage(family: family, model: model, operation: "chat", usage: usage)
response
else
parsed = ChatParser.new(raw_response).parsed
Rails.logger.debug("Non-stream raw_response['usage']: #{raw_response['usage'].inspect}")
log_langfuse_generation(
name: "chat_response",
model: model,
@@ -196,6 +212,7 @@ class Provider::Openai < Provider
session_id: session_id,
user_identifier: user_identifier
)
record_llm_usage(family: family, model: model, operation: "chat", usage: raw_response["usage"])
parsed
end
rescue => e
@@ -220,7 +237,8 @@ class Provider::Openai < Provider
function_results: [],
streamer: nil,
session_id: nil,
user_identifier: nil
user_identifier: nil,
family: nil
)
with_provider_response do
messages = build_generic_messages(
@@ -253,6 +271,8 @@ class Provider::Openai < Provider
user_identifier: user_identifier
)
record_llm_usage(family: family, model: model, operation: "chat", usage: raw_response["usage"])
# If a streamer was provided, manually call it with the parsed response
# to maintain the same contract as the streaming version
if streamer.present?
@@ -408,4 +428,46 @@ class Provider::Openai < Provider
rescue => e
Rails.logger.warn("Langfuse logging failed: #{e.message}")
end
def record_llm_usage(family:, model:, operation:, usage:)
return unless family && usage
Rails.logger.info("Recording LLM usage - Raw usage data: #{usage.inspect}")
# Handle both old and new OpenAI API response formats
# Old format: prompt_tokens, completion_tokens, total_tokens
# New format: input_tokens, output_tokens, total_tokens
prompt_tokens = usage["prompt_tokens"] || usage["input_tokens"] || 0
completion_tokens = usage["completion_tokens"] || usage["output_tokens"] || 0
total_tokens = usage["total_tokens"] || 0
Rails.logger.info("Extracted tokens - prompt: #{prompt_tokens}, completion: #{completion_tokens}, total: #{total_tokens}")
estimated_cost = LlmUsage.calculate_cost(
model: model,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens
)
# Log when we can't estimate the cost (e.g., custom/self-hosted models)
if estimated_cost.nil?
Rails.logger.info("Recording LLM usage without cost estimate for unknown model: #{model} (custom provider: #{custom_provider?})")
end
inferred_provider = LlmUsage.infer_provider(model)
family.llm_usages.create!(
provider: inferred_provider,
model: model,
operation: operation,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: total_tokens,
estimated_cost: estimated_cost,
metadata: {}
)
Rails.logger.info("LLM usage recorded successfully - Cost: #{estimated_cost.inspect}")
rescue => e
Rails.logger.error("Failed to record LLM usage: #{e.message}")
end
end

View File

@@ -1,11 +1,16 @@
class Provider::Openai::AutoCategorizer
def initialize(client, model: "", transactions: [], user_categories: [], custom_provider: false, langfuse_trace: nil)
include Provider::Openai::Concerns::UsageRecorder
attr_reader :client, :model, :transactions, :user_categories, :custom_provider, :langfuse_trace, :family
def initialize(client, model: "", transactions: [], user_categories: [], custom_provider: false, langfuse_trace: nil, family: nil)
@client = client
@model = model
@transactions = transactions
@user_categories = user_categories
@custom_provider = custom_provider
@langfuse_trace = langfuse_trace
@family = family
end
def auto_categorize
@@ -64,6 +69,16 @@ class Provider::Openai::AutoCategorizer
categorizations = extract_categorizations_native(response)
result = build_response(categorizations)
record_usage(
model.presence || Provider::Openai::DEFAULT_MODEL,
response.dig("usage"),
operation: "auto_categorize",
metadata: {
transaction_count: transactions.size,
category_count: user_categories.size
}
)
span&.end(output: result.map(&:to_h), usage: response.dig("usage"))
result
rescue => e
@@ -99,6 +114,16 @@ class Provider::Openai::AutoCategorizer
categorizations = extract_categorizations_generic(response)
result = build_response(categorizations)
record_usage(
model.presence || Provider::Openai::DEFAULT_MODEL,
response.dig("usage"),
operation: "auto_categorize",
metadata: {
transaction_count: transactions.size,
category_count: user_categories.size
}
)
span&.end(output: result.map(&:to_h), usage: response.dig("usage"))
result
rescue => e
@@ -106,8 +131,6 @@ class Provider::Openai::AutoCategorizer
raise
end
attr_reader :client, :model, :transactions, :user_categories, :custom_provider, :langfuse_trace
AutoCategorization = Provider::LlmConcept::AutoCategorization
def build_response(categorizations)

View File

@@ -1,11 +1,16 @@
class Provider::Openai::AutoMerchantDetector
def initialize(client, model: "", transactions:, user_merchants:, custom_provider: false, langfuse_trace: nil)
include Provider::Openai::Concerns::UsageRecorder
attr_reader :client, :model, :transactions, :user_merchants, :custom_provider, :langfuse_trace, :family
def initialize(client, model: "", transactions:, user_merchants:, custom_provider: false, langfuse_trace: nil, family: nil)
@client = client
@model = model
@transactions = transactions
@user_merchants = user_merchants
@custom_provider = custom_provider
@langfuse_trace = langfuse_trace
@family = family
end
def auto_detect_merchants
@@ -85,6 +90,16 @@ class Provider::Openai::AutoMerchantDetector
merchants = extract_merchants_native(response)
result = build_response(merchants)
record_usage(
model.presence || Provider::Openai::DEFAULT_MODEL,
response.dig("usage"),
operation: "auto_detect_merchants",
metadata: {
transaction_count: transactions.size,
merchant_count: user_merchants.size
}
)
span&.end(output: result.map(&:to_h), usage: response.dig("usage"))
result
rescue => e
@@ -120,6 +135,16 @@ class Provider::Openai::AutoMerchantDetector
merchants = extract_merchants_generic(response)
result = build_response(merchants)
record_usage(
model.presence || Provider::Openai::DEFAULT_MODEL,
response.dig("usage"),
operation: "auto_detect_merchants",
metadata: {
transaction_count: transactions.size,
merchant_count: user_merchants.size
}
)
span&.end(output: result.map(&:to_h), usage: response.dig("usage"))
result
rescue => e
@@ -127,8 +152,6 @@ class Provider::Openai::AutoMerchantDetector
raise
end
attr_reader :client, :model, :transactions, :user_merchants, :custom_provider, :langfuse_trace
AutoDetectedMerchant = Provider::LlmConcept::AutoDetectedMerchant
def build_response(categorizations)

View File

@@ -0,0 +1,47 @@
module Provider::Openai::Concerns::UsageRecorder
extend ActiveSupport::Concern
private
# Records LLM usage for a family
# Handles both old (prompt_tokens/completion_tokens) and new (input_tokens/output_tokens) API formats
# Automatically infers provider from model name
# Returns nil if pricing is unavailable (e.g., custom/self-hosted models)
def record_usage(model_name, usage_data, operation:, metadata: {})
return unless family && usage_data
# Handle both old and new OpenAI API response formats
# Old format: prompt_tokens, completion_tokens, total_tokens
# New format: input_tokens, output_tokens, total_tokens
prompt_tokens = usage_data["prompt_tokens"] || usage_data["input_tokens"] || 0
completion_tokens = usage_data["completion_tokens"] || usage_data["output_tokens"] || 0
total_tokens = usage_data["total_tokens"] || 0
estimated_cost = LlmUsage.calculate_cost(
model: model_name,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens
)
# Log when we can't estimate the cost (e.g., custom/self-hosted models)
if estimated_cost.nil?
Rails.logger.info("Recording LLM usage without cost estimate for unknown model: #{model_name} (custom provider: #{custom_provider})")
end
inferred_provider = LlmUsage.infer_provider(model_name)
family.llm_usages.create!(
provider: inferred_provider,
model: model_name,
operation: operation,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: total_tokens,
estimated_cost: estimated_cost,
metadata: metadata
)
Rails.logger.info("LLM usage recorded - Operation: #{operation}, Cost: #{estimated_cost.inspect}")
rescue => e
Rails.logger.error("Failed to record LLM usage: #{e.message}")
end
end

View File

@@ -1,9 +1,31 @@
class Rule::ActionExecutor::AutoCategorize < Rule::ActionExecutor
def label
base_label = "Auto-categorize transactions with AI"
if rule.family.self_hoster?
"Auto-categorize transactions with AI ($$)"
# Use the same provider determination logic as Family::AutoCategorizer
llm_provider = Provider::Registry.get_provider(:openai)
if llm_provider
# Estimate cost for typical batch of 20 transactions
selected_model = Provider::Openai.effective_model
estimated_cost = LlmUsage.estimate_auto_categorize_cost(
transaction_count: 20,
category_count: rule.family.categories.count,
model: selected_model
)
suffix =
if estimated_cost.nil?
" (cost: N/A)"
else
" (~$#{sprintf('%.4f', estimated_cost)} per 20 transactions)"
end
"#{base_label}#{suffix}"
else
"#{base_label} (no LLM provider configured)"
end
else
"Auto-categorize transactions"
base_label
end
end

View File

@@ -15,6 +15,37 @@
that meet the specified rule criteria. Please confirm if you wish to proceed with this change.
</p>
<% if @rule.actions.any? { |a| a.action_type == "auto_categorize" } %>
<% affected_count = @rule.affected_resource_count %>
<div class="mb-4 p-3 bg-blue-50 border border-blue-200 rounded-lg">
<div class="flex items-start gap-2">
<%= icon "info", class: "w-4 h-4 text-blue-600 mt-0.5 flex-shrink-0" %>
<div class="text-xs">
<p class="font-medium text-blue-900 mb-1">AI Cost Estimation</p>
<% if @estimated_cost.present? %>
<p class="text-blue-700">
This will use AI to categorize <%= affected_count %> transaction<%= "s" if affected_count != 1 %>.
Estimated cost: <span class="font-semibold">~$<%= sprintf("%.4f", @estimated_cost) %></span>
</p>
<% else %>
<p class="text-blue-700">
This will use AI to categorize <%= affected_count %> transaction<%= "s" if affected_count != 1 %>.
<% if @selected_model.present? %>
<span class="font-semibold">Cost estimation unavailable for model "<%= @selected_model %>".</span>
<% else %>
<span class="font-semibold">Cost estimation unavailable (no LLM provider configured).</span>
<% end %>
You may incur costs, please check with the model provider for the most up-to-date prices.
</p>
<% end %>
<p class="text-blue-600 mt-1">
<%= link_to "View usage history", settings_llm_usage_path, class: "underline hover:text-blue-800" %>
</p>
</div>
</div>
</div>
<% end %>
<%= render DS::Button.new(
text: "Confirm changes",
href: apply_rule_path(@rule),

View File

@@ -25,6 +25,7 @@ nav_sections = [
header: t(".advanced_section_title"),
items: [
{ label: t(".ai_prompts_label"), path: settings_ai_prompts_path, icon: "bot" },
{ label: "LLM Usage", path: settings_llm_usage_path, icon: "activity" },
{ label: t(".api_keys_label"), path: settings_api_key_path, icon: "key" },
{ label: t(".self_hosting_label"), path: settings_hosting_path, icon: "database", if: self_hosted? },
{ label: t(".imports_label"), path: imports_path, icon: "download" },

View File

@@ -0,0 +1,165 @@
<div class="bg-container rounded-xl shadow-border-xs p-4">
<div class="mb-6">
<h1 class="text-2xl font-semibold text-primary">LLM Usage & Costs</h1>
<p class="text-sm text-secondary mt-1">Track your AI usage and estimated costs</p>
</div>
<!-- Date Range Filter -->
<div class="mb-6">
<%= form_with url: settings_llm_usage_path, method: :get, class: "flex gap-4 items-end" do |f| %>
<div>
<%= f.label :start_date, "Start Date", class: "block text-sm font-medium text-primary mb-1" %>
<%= f.date_field :start_date, value: @start_date, class: "rounded-lg border border-primary px-3 py-2 text-sm" %>
</div>
<div>
<%= f.label :end_date, "End Date", class: "block text-sm font-medium text-primary mb-1" %>
<%= f.date_field :end_date, value: @end_date, class: "rounded-lg border border-primary px-3 py-2 text-sm" %>
</div>
<%= f.submit "Filter", class: "rounded-lg bg-gray-900 px-4 py-2 text-sm font-medium text-white hover:bg-gray-800" %>
<% end %>
</div>
<!-- Statistics Summary -->
<div class="grid grid-cols-1 md:grid-cols-4 gap-4 mb-6">
<div class="bg-container-inset rounded-lg p-4">
<div class="flex items-center gap-2 mb-2">
<%= icon "activity", class: "w-5 h-5 text-secondary" %>
<p class="text-xs font-medium text-secondary uppercase">Total Requests</p>
</div>
<p class="text-2xl font-semibold text-primary"><%= number_with_delimiter(@statistics[:total_requests]) %></p>
</div>
<div class="bg-container-inset rounded-lg p-4">
<div class="flex items-center gap-2 mb-2">
<%= icon "hash", class: "w-5 h-5 text-secondary" %>
<p class="text-xs font-medium text-secondary uppercase">Total Tokens</p>
</div>
<p class="text-2xl font-semibold text-primary"><%= number_with_delimiter(@statistics[:total_tokens]) %></p>
<p class="text-xs text-secondary mt-1">
<%= number_with_delimiter(@statistics[:total_prompt_tokens]) %> prompt /
<%= number_with_delimiter(@statistics[:total_completion_tokens]) %> completion
</p>
</div>
<div class="bg-container-inset rounded-lg p-4">
<div class="flex items-center gap-2 mb-2">
<%= icon "dollar-sign", class: "w-5 h-5 text-secondary" %>
<p class="text-xs font-medium text-secondary uppercase">Total Cost</p>
</div>
<p class="text-2xl font-semibold text-primary">$<%= sprintf("%.2f", @statistics[:total_cost]) %></p>
</div>
<div class="bg-container-inset rounded-lg p-4">
<div class="flex items-center gap-2 mb-2">
<%= icon "trending-up", class: "w-5 h-5 text-secondary" %>
<p class="text-xs font-medium text-secondary uppercase">Avg Cost/Request</p>
</div>
<p class="text-2xl font-semibold text-primary">
$<%= sprintf("%.4f", @statistics[:avg_cost]) %>
</p>
<% if @statistics[:requests_with_cost] < @statistics[:total_requests] %>
<p class="text-xs text-secondary mt-1">
Based on <%= number_with_delimiter(@statistics[:requests_with_cost]) %> of
<%= number_with_delimiter(@statistics[:total_requests]) %> requests with cost data
</p>
<% end %>
</div>
</div>
<!-- Cost by Operation -->
<% if @statistics[:by_operation].any? %>
<div class="mb-6">
<h2 class="text-lg font-semibold text-primary mb-3">Cost by Operation</h2>
<div class="bg-container-inset rounded-lg p-4">
<div class="space-y-2">
<% @statistics[:by_operation].each do |operation, cost| %>
<div class="flex justify-between items-center">
<span class="text-sm text-primary"><%= operation.humanize %></span>
<span class="text-sm font-medium text-primary">$<%= sprintf("%.4f", cost) %></span>
</div>
<% end %>
</div>
</div>
</div>
<% end %>
<!-- Cost by Model -->
<% if @statistics[:by_model].any? %>
<div class="mb-6">
<h2 class="text-lg font-semibold text-primary mb-3">Cost by Model</h2>
<div class="bg-container-inset rounded-lg p-4">
<div class="space-y-2">
<% @statistics[:by_model].each do |model, cost| %>
<div class="flex justify-between items-center">
<span class="text-sm text-primary font-mono"><%= model %></span>
<span class="text-sm font-medium text-primary">$<%= sprintf("%.4f", cost) %></span>
</div>
<% end %>
</div>
</div>
</div>
<% end %>
<!-- Recent Usage Table -->
<div>
<h2 class="text-lg font-semibold text-primary mb-3">Recent Usage</h2>
<div class="bg-container-inset rounded-lg overflow-hidden">
<% if @llm_usages.any? %>
<table class="w-full">
<thead class="bg-surface-default border-b border-primary">
<tr>
<th class="px-4 py-3 text-left text-xs font-medium text-secondary uppercase">Date</th>
<th class="px-4 py-3 text-left text-xs font-medium text-secondary uppercase">Operation</th>
<th class="px-4 py-3 text-left text-xs font-medium text-secondary uppercase">Model</th>
<th class="px-4 py-3 text-right text-xs font-medium text-secondary uppercase">Tokens</th>
<th class="px-4 py-3 text-right text-xs font-medium text-secondary uppercase">Cost</th>
</tr>
</thead>
<tbody class="divide-y divide-gray-100">
<% @llm_usages.each do |usage| %>
<tr>
<td class="px-4 py-3 text-sm text-primary whitespace-nowrap">
<%= usage.created_at.strftime("%b %d, %Y %I:%M %p") %>
</td>
<td class="px-4 py-3 text-sm text-primary">
<%= usage.operation.humanize %>
</td>
<td class="px-4 py-3 text-sm text-primary font-mono">
<%= usage.model %>
</td>
<td class="px-4 py-3 text-sm text-primary text-right whitespace-nowrap">
<%= number_with_delimiter(usage.total_tokens) %>
<span class="text-xs text-secondary">
(<%= number_with_delimiter(usage.prompt_tokens) %>/<%= number_with_delimiter(usage.completion_tokens) %>)
</span>
</td>
<td class="px-4 py-3 text-sm font-medium text-primary text-right whitespace-nowrap">
<%= usage.formatted_cost %>
</td>
</tr>
<% end %>
</tbody>
</table>
<% else %>
<div class="p-8 text-center">
<p class="text-secondary">No usage data found for the selected period</p>
</div>
<% end %>
</div>
</div>
<!-- Pricing Information -->
<div class="mt-6 p-4 bg-blue-50 border border-blue-200 rounded-lg">
<div class="flex items-start gap-2">
<%= icon "info", class: "w-5 h-5 text-blue-600 mt-0.5" %>
<div>
<p class="text-sm font-medium text-blue-900">About Cost Estimates</p>
<p class="text-xs text-blue-700 mt-1">
Costs are estimated based on OpenAI's pricing as of 2025. Actual costs may vary.
Pricing is per 1 million tokens and varies by model.
Custom or self-hosted models will show "N/A" and are not included in cost totals.
</p>
</div>
</div>
</div>
</div>