Fix "Messages is invalid" error for Ollama/custom LLM providers and add comprehensive AI documentation (#225)

* Add comprehensive AI/LLM configuration documentation
* Fix Chat.start! to use default model when model is nil or empty
* Ensure all controllers use Chat.default_model for consistency
* Move AI doc inside `hosting/`
* Probably too much error handling

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: jjmata <187772+jjmata@users.noreply.github.com>
Co-authored-by: Juan José Mata <juanjo.mata@gmail.com>
This commit is contained in:
Copilot
2025-10-24 12:04:19 +02:00
committed by GitHub
parent 4f446307a7
commit a8f318c3f9
13 changed files with 833 additions and 11 deletions

View File

@@ -28,4 +28,36 @@ class ChatTest < ActiveSupport::TestCase
assert_equal 1, chat.messages.where(type: "UserMessage").count
end
end
test "creates with default model when model is nil" do
prompt = "Test prompt"
assert_difference "@user.chats.count", 1 do
chat = @user.chats.start!(prompt, model: nil)
assert_equal 1, chat.messages.count
assert_equal Provider::Openai::DEFAULT_MODEL, chat.messages.first.ai_model
end
end
test "creates with default model when model is empty string" do
prompt = "Test prompt"
assert_difference "@user.chats.count", 1 do
chat = @user.chats.start!(prompt, model: "")
assert_equal 1, chat.messages.count
assert_equal Provider::Openai::DEFAULT_MODEL, chat.messages.first.ai_model
end
end
test "creates with configured model when OPENAI_MODEL env is set" do
prompt = "Test prompt"
with_env_overrides OPENAI_MODEL: "custom-model" do
chat = @user.chats.start!(prompt, model: "")
assert_equal "custom-model", chat.messages.first.ai_model
end
end
end