mirror of
https://github.com/we-promise/sure.git
synced 2026-04-11 00:04:47 +00:00
* Abstract Assistant into module with registry (fixes #1016) - Add Assistant module with registry/factory (builtin, external) - Assistant.for_chat(chat) routes by family.assistant_type - Assistant.config_for(chat) delegates to Builtin for backward compat - Assistant.available_types returns registered types - Add Assistant::Base (Broadcastable, respond_to contract) - Move current behavior to Assistant::Builtin (Provided + Configurable) - Add Assistant::External stub for future OpenClaw/WebSocket - Migration: add families.assistant_type (default builtin) - Family: validate assistant_type inclusion - Tests: for_chat routing, available_types, External stub, blank chat guard * Fix RuboCop layout: indentation in Assistant module and tests * Move new test methods above private so Minitest discovers them * Clear thinking indicator in External#respond_to to avoid stuck UI * Rebase onto upstream main: fix schema to avoid spurious diffs - Rebase feature/abstract-assistant-1016 onto we-promise/main - Rename migration to 20260218120001 to avoid duplicate version with backfill_crypto_subtype - Regenerate schema from upstream + assistant_type only (keeps vector_store_id, realized_gain, etc.) - PR schema diff now shows only assistant_type addition and version bump --------- Co-authored-by: mkdev11 <jaysmth689+github@users.noreply.github.com>
96 lines
2.7 KiB
Ruby
96 lines
2.7 KiB
Ruby
class Assistant::Builtin < Assistant::Base
|
|
include Assistant::Provided
|
|
include Assistant::Configurable
|
|
|
|
attr_reader :instructions
|
|
|
|
class << self
|
|
def for_chat(chat)
|
|
config = config_for(chat)
|
|
new(chat, instructions: config[:instructions], functions: config[:functions])
|
|
end
|
|
end
|
|
|
|
def initialize(chat, instructions: nil, functions: [])
|
|
super(chat)
|
|
@instructions = instructions
|
|
@functions = functions
|
|
end
|
|
|
|
def respond_to(message)
|
|
assistant_message = AssistantMessage.new(
|
|
chat: chat,
|
|
content: "",
|
|
ai_model: message.ai_model
|
|
)
|
|
|
|
llm_provider = get_model_provider(message.ai_model)
|
|
unless llm_provider
|
|
raise StandardError, build_no_provider_error_message(message.ai_model)
|
|
end
|
|
|
|
responder = Assistant::Responder.new(
|
|
message: message,
|
|
instructions: instructions,
|
|
function_tool_caller: function_tool_caller,
|
|
llm: llm_provider
|
|
)
|
|
|
|
latest_response_id = chat.latest_assistant_response_id
|
|
|
|
responder.on(:output_text) do |text|
|
|
if assistant_message.content.blank?
|
|
stop_thinking
|
|
Chat.transaction do
|
|
assistant_message.append_text!(text)
|
|
chat.update_latest_response!(latest_response_id)
|
|
end
|
|
else
|
|
assistant_message.append_text!(text)
|
|
end
|
|
end
|
|
|
|
responder.on(:response) do |data|
|
|
update_thinking("Analyzing your data...")
|
|
if data[:function_tool_calls].present?
|
|
assistant_message.tool_calls = data[:function_tool_calls]
|
|
latest_response_id = data[:id]
|
|
else
|
|
chat.update_latest_response!(data[:id])
|
|
end
|
|
end
|
|
|
|
responder.respond(previous_response_id: latest_response_id)
|
|
rescue => e
|
|
stop_thinking
|
|
chat.add_error(e)
|
|
end
|
|
|
|
private
|
|
|
|
attr_reader :functions
|
|
|
|
def function_tool_caller
|
|
@function_tool_caller ||= Assistant::FunctionToolCaller.new(
|
|
functions.map { |fn| fn.new(chat.user) }
|
|
)
|
|
end
|
|
|
|
def build_no_provider_error_message(requested_model)
|
|
available_providers = registry.providers
|
|
if available_providers.empty?
|
|
"No LLM provider configured that supports model '#{requested_model}'. " \
|
|
"Please configure an LLM provider (e.g., OpenAI) in settings."
|
|
else
|
|
provider_details = available_providers.map do |provider|
|
|
" - #{provider.provider_name}: #{provider.supported_models_description}"
|
|
end.join("\n")
|
|
"No LLM provider configured that supports model '#{requested_model}'.\n\n" \
|
|
"Available providers:\n#{provider_details}\n\n" \
|
|
"Please either:\n" \
|
|
" 1. Use a supported model from the list above, or\n" \
|
|
" 2. Configure a provider that supports '#{requested_model}' in settings."
|
|
end
|
|
end
|
|
end
|