Compare commits

...

4 Commits

Author SHA1 Message Date
Claude
3e3466862a test(sqla): remove duplicate test function causing mypy no-redef error
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-07 16:07:48 -07:00
Superset Dev
7704d4c811 test: add unit tests verifying values_for_column passes catalog/schema to get_sqla_engine
Addresses review feedback requesting regression coverage to assert that
the dataset's catalog and schema are forwarded to get_sqla_engine,
including the None/empty-schema case.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-07 13:32:29 -07:00
Claude
82087d2fb5 test(sqla): add unit test verifying catalog/schema forwarded in values_for_column
Adds `test_values_for_column_passes_catalog_and_schema` to assert that
`values_for_column` passes the dataset's catalog and schema to
`get_sqla_engine`, covering both the explicit-schema and None/no-schema cases.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-07 13:10:29 -07:00
Superset Dev
b2c10f909b fix(sqla): pass catalog and schema to get_sqla_engine in values_for_column
The `values_for_column()` method, which powers the native filter value
dropdown, was calling `self.database.get_sqla_engine()` without passing
`catalog` or `schema`. This meant `adjust_engine_params()` was invoked
without schema context, so databases that encode the schema in the
connection URL (MySQL, Snowflake, Presto, etc.) would not apply the
correct schema for filter dropdown queries.

Every other query path in `ExploreMixin` already passes `self.catalog`
and `self.schema` — this change makes `values_for_column()` consistent.

Closes #32498

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-16 16:27:10 -07:00
2 changed files with 122 additions and 1 deletions

View File

@@ -2456,7 +2456,9 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
if rls_filters:
qry = qry.where(and_(*rls_filters))
with self.database.get_sqla_engine() as engine:
with self.database.get_sqla_engine(
catalog=self.catalog, schema=self.schema
) as engine:
sql = str(qry.compile(engine, compile_kwargs={"literal_binds": True}))
sql = self._apply_cte(sql, cte)

View File

@@ -98,6 +98,125 @@ def test_values_for_column(database: Database) -> None:
assert table.values_for_column("a") == [1, None]
def test_values_for_column_passes_catalog_and_schema(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test that `values_for_column` forwards the dataset's catalog and schema
to `database.get_sqla_engine()` so that engine params are adjusted with
the correct schema context (e.g. for MySQL, Snowflake, Presto).
"""
import pandas as pd
from superset.connectors.sqla.models import SqlaTable, TableColumn
from superset.models.core import Database
SqlaTable.metadata.create_all(session.get_bind())
engine = create_engine(
"sqlite://",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
database = Database(database_name="db", sqlalchemy_uri="sqlite://")
connection = engine.raw_connection()
connection.execute("CREATE TABLE t (a INTEGER, b TEXT)")
connection.execute("INSERT INTO t VALUES (1, 'Alice')")
connection.commit()
# Track the catalog/schema values passed to get_sqla_engine
captured_kwargs: dict[str, str | None] = {}
@contextmanager
def mock_get_sqla_engine(catalog=None, schema=None, **kwargs):
captured_kwargs["catalog"] = catalog
captured_kwargs["schema"] = schema
yield engine
mocker.patch.object(
database,
"get_sqla_engine",
new=mock_get_sqla_engine,
)
table = SqlaTable(
database=database,
catalog="my_catalog",
schema="my_schema",
table_name="t",
columns=[TableColumn(column_name="a")],
)
with patch(
"pandas.read_sql_query",
return_value=pd.DataFrame({"column_values": [1]}),
):
table.values_for_column("a")
assert captured_kwargs["catalog"] == "my_catalog"
assert captured_kwargs["schema"] == "my_schema"
def test_values_for_column_passes_none_catalog_and_schema(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test that `values_for_column` forwards None catalog/schema when the
dataset has no catalog or schema set.
"""
import pandas as pd
from superset.connectors.sqla.models import SqlaTable, TableColumn
from superset.models.core import Database
SqlaTable.metadata.create_all(session.get_bind())
engine = create_engine(
"sqlite://",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
database = Database(database_name="db", sqlalchemy_uri="sqlite://")
connection = engine.raw_connection()
connection.execute("CREATE TABLE t (a INTEGER, b TEXT)")
connection.execute("INSERT INTO t VALUES (1, 'Alice')")
connection.commit()
captured_kwargs: dict[str, str | None] = {}
@contextmanager
def mock_get_sqla_engine(catalog=None, schema=None, **kwargs):
captured_kwargs["catalog"] = catalog
captured_kwargs["schema"] = schema
yield engine
mocker.patch.object(
database,
"get_sqla_engine",
new=mock_get_sqla_engine,
)
table = SqlaTable(
database=database,
schema=None,
table_name="t",
columns=[TableColumn(column_name="a")],
)
with patch(
"pandas.read_sql_query",
return_value=pd.DataFrame({"column_values": [1]}),
):
table.values_for_column("a")
assert captured_kwargs["catalog"] is None
assert captured_kwargs["schema"] is None
def test_values_for_column_with_rls(database: Database) -> None:
"""
Test the `values_for_column` method with RLS enabled.