Fix "Messages is invalid" error for Ollama/custom LLM providers and add comprehensive AI documentation (#225)

* Add comprehensive AI/LLM configuration documentation
* Fix Chat.start! to use default model when model is nil or empty
* Ensure all controllers use Chat.default_model for consistency
* Move AI doc inside `hosting/`
* Probably too much error handling

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: jjmata <187772+jjmata@users.noreply.github.com>
Co-authored-by: Juan José Mata <juanjo.mata@gmail.com>
This commit is contained in:
Copilot
2025-10-24 12:04:19 +02:00
committed by GitHub
parent 4f446307a7
commit a8f318c3f9
13 changed files with 833 additions and 11 deletions

View File

@@ -257,4 +257,33 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
assert_includes response_chunks.first.data.messages.first.output_text, "$10,000"
end
end
test "provider_name returns OpenAI for standard provider" do
assert_equal "OpenAI", @subject.provider_name
end
test "provider_name returns custom info for custom provider" do
custom_provider = Provider::Openai.new(
"test-token",
uri_base: "https://custom-api.example.com/v1",
model: "custom-model"
)
assert_equal "Custom OpenAI-compatible (https://custom-api.example.com/v1)", custom_provider.provider_name
end
test "supported_models_description returns model prefixes for standard provider" do
expected = "models starting with: gpt-4, gpt-5, o1, o3"
assert_equal expected, @subject.supported_models_description
end
test "supported_models_description returns configured model for custom provider" do
custom_provider = Provider::Openai.new(
"test-token",
uri_base: "https://custom-api.example.com/v1",
model: "custom-model"
)
assert_equal "configured model: custom-model", custom_provider.supported_models_description
end
end

View File

@@ -0,0 +1,62 @@
require "test_helper"
class Provider::RegistryTest < ActiveSupport::TestCase
test "providers filters out nil values when provider is not configured" do
# Ensure OpenAI is not configured
Setting.stubs(:openai_access_token).returns(nil)
ENV.stubs(:fetch).with("OPENAI_ACCESS_TOKEN", nil).returns(nil)
registry = Provider::Registry.for_concept(:llm)
# Should return empty array instead of [nil]
assert_equal [], registry.providers
end
test "providers returns configured providers" do
# Mock a configured OpenAI provider
mock_provider = mock("openai_provider")
Provider::Registry.stubs(:openai).returns(mock_provider)
registry = Provider::Registry.for_concept(:llm)
assert_equal [ mock_provider ], registry.providers
end
test "get_provider raises error when provider not found for concept" do
registry = Provider::Registry.for_concept(:llm)
error = assert_raises(Provider::Registry::Error) do
registry.get_provider(:nonexistent)
end
assert_match(/Provider 'nonexistent' not found for concept: llm/, error.message)
end
test "get_provider returns nil when provider not configured" do
# Ensure OpenAI is not configured
Setting.stubs(:openai_access_token).returns(nil)
ENV.stubs(:[]).with("OPENAI_ACCESS_TOKEN").returns(nil)
registry = Provider::Registry.for_concept(:llm)
# Should return nil when provider method exists but returns nil
assert_nil registry.get_provider(:openai)
end
test "openai provider falls back to Setting when ENV is empty string" do
# Simulate ENV being set to empty string (common in Docker/env files)
ENV.stubs(:[]).with("OPENAI_ACCESS_TOKEN").returns("")
ENV.stubs(:[]).with("OPENAI_URI_BASE").returns("")
ENV.stubs(:[]).with("OPENAI_MODEL").returns("")
Setting.stubs(:openai_access_token).returns("test-token-from-setting")
Setting.stubs(:openai_uri_base).returns(nil)
Setting.stubs(:openai_model).returns(nil)
provider = Provider::Registry.get_provider(:openai)
# Should successfully create provider using Setting value
assert_not_nil provider
assert_instance_of Provider::Openai, provider
end
end