mirror of
https://github.com/we-promise/sure.git
synced 2026-04-07 14:31:25 +00:00
* Add comprehensive AI/LLM configuration documentation * Fix Chat.start! to use default model when model is nil or empty * Ensure all controllers use Chat.default_model for consistency * Move AI doc inside `hosting/` * Probably too much error handling --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jjmata <187772+jjmata@users.noreply.github.com> Co-authored-by: Juan José Mata <juanjo.mata@gmail.com>
102 lines
2.8 KiB
Ruby
102 lines
2.8 KiB
Ruby
class Assistant
|
|
include Provided, Configurable, Broadcastable
|
|
|
|
attr_reader :chat, :instructions
|
|
|
|
class << self
|
|
def for_chat(chat)
|
|
config = config_for(chat)
|
|
new(chat, instructions: config[:instructions], functions: config[:functions])
|
|
end
|
|
end
|
|
|
|
def initialize(chat, instructions: nil, functions: [])
|
|
@chat = chat
|
|
@instructions = instructions
|
|
@functions = functions
|
|
end
|
|
|
|
def respond_to(message)
|
|
assistant_message = AssistantMessage.new(
|
|
chat: chat,
|
|
content: "",
|
|
ai_model: message.ai_model
|
|
)
|
|
|
|
llm_provider = get_model_provider(message.ai_model)
|
|
|
|
unless llm_provider
|
|
error_message = build_no_provider_error_message(message.ai_model)
|
|
raise StandardError, error_message
|
|
end
|
|
|
|
responder = Assistant::Responder.new(
|
|
message: message,
|
|
instructions: instructions,
|
|
function_tool_caller: function_tool_caller,
|
|
llm: llm_provider
|
|
)
|
|
|
|
latest_response_id = chat.latest_assistant_response_id
|
|
|
|
responder.on(:output_text) do |text|
|
|
if assistant_message.content.blank?
|
|
stop_thinking
|
|
|
|
Chat.transaction do
|
|
assistant_message.append_text!(text)
|
|
chat.update_latest_response!(latest_response_id)
|
|
end
|
|
else
|
|
assistant_message.append_text!(text)
|
|
end
|
|
end
|
|
|
|
responder.on(:response) do |data|
|
|
update_thinking("Analyzing your data...")
|
|
|
|
if data[:function_tool_calls].present?
|
|
assistant_message.tool_calls = data[:function_tool_calls]
|
|
latest_response_id = data[:id]
|
|
else
|
|
chat.update_latest_response!(data[:id])
|
|
end
|
|
end
|
|
|
|
responder.respond(previous_response_id: latest_response_id)
|
|
rescue => e
|
|
stop_thinking
|
|
chat.add_error(e)
|
|
end
|
|
|
|
private
|
|
attr_reader :functions
|
|
|
|
def function_tool_caller
|
|
function_instances = functions.map do |fn|
|
|
fn.new(chat.user)
|
|
end
|
|
|
|
@function_tool_caller ||= FunctionToolCaller.new(function_instances)
|
|
end
|
|
|
|
def build_no_provider_error_message(requested_model)
|
|
available_providers = registry.providers
|
|
|
|
if available_providers.empty?
|
|
"No LLM provider configured that supports model '#{requested_model}'. " \
|
|
"Please configure an LLM provider (e.g., OpenAI) in settings."
|
|
else
|
|
provider_details = available_providers.map do |provider|
|
|
" - #{provider.provider_name}: #{provider.supported_models_description}"
|
|
end.join("\n")
|
|
|
|
"No LLM provider configured that supports model '#{requested_model}'.\n\n" \
|
|
"Available providers:\n#{provider_details}\n\n" \
|
|
"Please either:\n" \
|
|
" 1. Use a supported model from the list above, or\n" \
|
|
" 2. Configure a provider that supports '#{requested_model}' in settings."
|
|
end
|
|
end
|
|
end
|