mirror of
https://github.com/apache/superset.git
synced 2026-05-14 12:25:19 +00:00
Compare commits
12 Commits
sisyphus/i
...
fix/mcp-ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16d136ef8d | ||
|
|
c78658d852 | ||
|
|
5b5dd01028 | ||
|
|
4aa4415d8f | ||
|
|
e667ceb6cf | ||
|
|
9aaa12c7d4 | ||
|
|
adfbbf1433 | ||
|
|
d7663a9a1c | ||
|
|
7290d3c452 | ||
|
|
d7beffcec1 | ||
|
|
f018b67895 | ||
|
|
5e2c6d8c9e |
@@ -114,7 +114,7 @@ dependencies = [
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
||||
athena = ["pyathena[pandas]>=2, <3"]
|
||||
athena = ["pyathena[pandas]>=2, <4"]
|
||||
aurora-data-api = ["preset-sqlalchemy-aurora-data-api>=0.2.8,<0.3"]
|
||||
bigquery = [
|
||||
"pandas-gbq>=0.19.1",
|
||||
@@ -135,7 +135,7 @@ databricks = [
|
||||
"databricks-sqlalchemy==1.0.5",
|
||||
]
|
||||
db2 = ["ibm-db-sa>0.3.8, <=0.4.0"]
|
||||
denodo = ["denodo-sqlalchemy~=1.0.6"]
|
||||
denodo = ["denodo-sqlalchemy>=1.0.6,<2.1.0"]
|
||||
dremio = ["sqlalchemy-dremio>=1.2.1, <4"]
|
||||
drill = ["sqlalchemy-drill>=1.1.4, <2"]
|
||||
druid = ["pydruid>=0.6.5,<0.7"]
|
||||
@@ -197,7 +197,7 @@ tdengine = [
|
||||
]
|
||||
teradata = ["teradatasql>=16.20.0.23"]
|
||||
thumbnails = [] # deprecated, will be removed in 7.0
|
||||
vertica = ["sqlalchemy-vertica-python>=0.5.9, < 0.6"]
|
||||
vertica = ["sqlalchemy-vertica-python>= 0.5.9, < 0.7"]
|
||||
netezza = ["nzalchemy>=11.0.2"]
|
||||
starrocks = ["starrocks>=1.0.0"]
|
||||
doris = ["pydoris>=1.0.0, <2.0.0"]
|
||||
|
||||
16
superset-frontend/package-lock.json
generated
16
superset-frontend/package-lock.json
generated
@@ -115,7 +115,7 @@
|
||||
"memoize-one": "^5.2.1",
|
||||
"mousetrap": "^1.6.5",
|
||||
"mustache": "^4.2.0",
|
||||
"nanoid": "^5.1.9",
|
||||
"nanoid": "^5.1.11",
|
||||
"ol": "^10.9.0",
|
||||
"pretty-ms": "^9.3.0",
|
||||
"query-string": "9.3.1",
|
||||
@@ -249,7 +249,7 @@
|
||||
"eslint-plugin-no-only-tests": "^3.4.0",
|
||||
"eslint-plugin-prettier": "^5.5.5",
|
||||
"eslint-plugin-react-prefer-function-component": "^5.0.0",
|
||||
"eslint-plugin-react-you-might-not-need-an-effect": "^0.9.3",
|
||||
"eslint-plugin-react-you-might-not-need-an-effect": "^0.10.0",
|
||||
"eslint-plugin-storybook": "^0.8.0",
|
||||
"eslint-plugin-testing-library": "^7.16.2",
|
||||
"eslint-plugin-theme-colors": "file:eslint-rules/eslint-plugin-theme-colors",
|
||||
@@ -22872,9 +22872,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/eslint-plugin-react-you-might-not-need-an-effect": {
|
||||
"version": "0.9.3",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-react-you-might-not-need-an-effect/-/eslint-plugin-react-you-might-not-need-an-effect-0.9.3.tgz",
|
||||
"integrity": "sha512-44cce7LndBnpDRWBTQ8p7ircIdl2rJBP5+V9Ik64E935UB47uA9ZMU1Uv160lAMhtvoPYqXBjQ+tojr5JF3mFQ==",
|
||||
"version": "0.10.0",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-react-you-might-not-need-an-effect/-/eslint-plugin-react-you-might-not-need-an-effect-0.10.0.tgz",
|
||||
"integrity": "sha512-a4pugbQc2zLiE2NZGuXdTjtMNvlP2984QFPDv71eskUYDzigLFYfBL4QjK+RnRtcboHoXRKOcQqEZKxiK6KegA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -36703,9 +36703,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/nanoid": {
|
||||
"version": "5.1.9",
|
||||
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.9.tgz",
|
||||
"integrity": "sha512-ZUvP7KeBLe3OZ1ypw6dI/TzYJuvHP77IM4Ry73waSQTLn8/g8rpdjfyVAh7t1/+FjBtG4lCP42MEbDxOsRpBMw==",
|
||||
"version": "5.1.11",
|
||||
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.11.tgz",
|
||||
"integrity": "sha512-v+KEsUv2ps74PaSKv0gHTxTCgMXOIfBEbaqa6w6ISIGC7ZsvHN4N9oJ8d4cmf0n5oTzQz2SLmThbQWhjd/8eKg==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
|
||||
@@ -196,7 +196,7 @@
|
||||
"memoize-one": "^5.2.1",
|
||||
"mousetrap": "^1.6.5",
|
||||
"mustache": "^4.2.0",
|
||||
"nanoid": "^5.1.9",
|
||||
"nanoid": "^5.1.11",
|
||||
"ol": "^10.9.0",
|
||||
"pretty-ms": "^9.3.0",
|
||||
"query-string": "9.3.1",
|
||||
@@ -330,7 +330,7 @@
|
||||
"eslint-plugin-no-only-tests": "^3.4.0",
|
||||
"eslint-plugin-prettier": "^5.5.5",
|
||||
"eslint-plugin-react-prefer-function-component": "^5.0.0",
|
||||
"eslint-plugin-react-you-might-not-need-an-effect": "^0.9.3",
|
||||
"eslint-plugin-react-you-might-not-need-an-effect": "^0.10.0",
|
||||
"eslint-plugin-storybook": "^0.8.0",
|
||||
"eslint-plugin-testing-library": "^7.16.2",
|
||||
"eslint-plugin-theme-colors": "file:eslint-rules/eslint-plugin-theme-colors",
|
||||
|
||||
@@ -307,6 +307,20 @@ export class ThemeController {
|
||||
return this.currentMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the resolved theme mode as 'dark' or 'light'.
|
||||
* Takes into account SYSTEM mode and returns the actual resolved preference.
|
||||
*/
|
||||
public getCurrentModeResolved(): 'dark' | 'light' {
|
||||
const activeTheme = this.getThemeForMode(this.currentMode);
|
||||
if (activeTheme) {
|
||||
const normalizedTheme = this.normalizeTheme(activeTheme);
|
||||
return isThemeConfigDark(normalizedTheme) ? 'dark' : 'light';
|
||||
}
|
||||
|
||||
return this.currentMode === ThemeMode.DARK ? 'dark' : 'light';
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets new theme.
|
||||
* @param theme - The new theme to apply
|
||||
|
||||
@@ -53,11 +53,19 @@ export function SupersetThemeProvider({
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const unsubscribe = themeController.onChange(theme => {
|
||||
// TODO: Once we migrate to react>=18 is should be possible
|
||||
// to replace the useState and useEffect with a singular
|
||||
// useSyncExternalStore, simplifying quite a bit
|
||||
const updateState = (theme: Theme) => {
|
||||
setCurrentTheme(theme);
|
||||
setCurrentThemeMode(themeController.getCurrentMode());
|
||||
});
|
||||
|
||||
document.documentElement.setAttribute(
|
||||
'data-theme-mode',
|
||||
themeController.getCurrentModeResolved(),
|
||||
);
|
||||
};
|
||||
const unsubscribe = themeController.onChange(updateState);
|
||||
updateState(themeController.getTheme());
|
||||
return unsubscribe;
|
||||
}, [themeController]);
|
||||
|
||||
|
||||
@@ -1798,3 +1798,38 @@ test('ThemeController invalid initialMode falls back to SYSTEM', () => {
|
||||
// falling through to the default SYSTEM mode
|
||||
expect(controller.getCurrentMode()).toBe(ThemeMode.SYSTEM);
|
||||
});
|
||||
|
||||
test('getCurrentModeResolved returns light for light theme', () => {
|
||||
mockGetBootstrapData.mockReturnValue(
|
||||
createMockBootstrapData({
|
||||
default: { token: { colorBgBase: '#ffffff' } },
|
||||
dark: {
|
||||
token: { colorBgBase: '#000000' },
|
||||
algorithm: ThemeAlgorithm.DARK,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const controller = createController();
|
||||
expect(controller.getCurrentModeResolved()).toBe('light');
|
||||
controller.setThemeMode(ThemeMode.DARK);
|
||||
expect(controller.getCurrentModeResolved()).toBe('dark');
|
||||
});
|
||||
|
||||
test('getResolvedThemeMode returns dark when default theme is dark but mode is DEFAULT', () => {
|
||||
// Setup: default theme is dark (has dark algorithm)
|
||||
// This simulates single-theme deployments where THEME_DARK=None but default is dark
|
||||
mockGetBootstrapData.mockReturnValue(
|
||||
createMockBootstrapData({
|
||||
default: {
|
||||
token: { colorBgBase: '#000000' }, // dark background
|
||||
algorithm: antdThemeImport.darkAlgorithm,
|
||||
},
|
||||
dark: {}, // empty - no separate dark theme
|
||||
}),
|
||||
);
|
||||
|
||||
const controller = createController();
|
||||
expect(controller.getCurrentMode()).toBe(ThemeMode.DEFAULT);
|
||||
expect(controller.getCurrentModeResolved()).toBe('dark');
|
||||
});
|
||||
|
||||
@@ -75,6 +75,7 @@ describe('SupersetThemeProvider', () => {
|
||||
mockThemeController = {
|
||||
getTheme: jest.fn().mockReturnValue(mockTheme),
|
||||
getCurrentMode: jest.fn().mockReturnValue(ThemeMode.DEFAULT),
|
||||
getCurrentModeResolved: jest.fn().mockReturnValue('dark'),
|
||||
setTheme: jest.fn(),
|
||||
setThemeMode: jest.fn(),
|
||||
resetTheme: jest.fn(),
|
||||
@@ -276,4 +277,59 @@ describe('SupersetThemeProvider', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
document.documentElement.removeAttribute('data-theme-mode');
|
||||
});
|
||||
|
||||
test('should set data-theme-mode="light" on mount when resolved mode is light', () => {
|
||||
mockThemeController.getCurrentModeResolved.mockReturnValue('light');
|
||||
|
||||
render(
|
||||
<SupersetThemeProvider themeController={mockThemeController}>
|
||||
<div>Content</div>
|
||||
</SupersetThemeProvider>,
|
||||
);
|
||||
|
||||
expect(document.documentElement.getAttribute('data-theme-mode')).toBe(
|
||||
'light',
|
||||
);
|
||||
});
|
||||
|
||||
test('should set data-theme-mode="dark" on mount when resolved mode is dark', () => {
|
||||
mockThemeController.getCurrentModeResolved.mockReturnValue('dark');
|
||||
|
||||
render(
|
||||
<SupersetThemeProvider themeController={mockThemeController}>
|
||||
<div>Content</div>
|
||||
</SupersetThemeProvider>,
|
||||
);
|
||||
|
||||
expect(document.documentElement.getAttribute('data-theme-mode')).toBe(
|
||||
'dark',
|
||||
);
|
||||
});
|
||||
|
||||
test('should update data-theme-mode when theme changes', () => {
|
||||
mockThemeController.getCurrentModeResolved.mockReturnValue('light');
|
||||
|
||||
render(
|
||||
<SupersetThemeProvider themeController={mockThemeController}>
|
||||
<div>Content</div>
|
||||
</SupersetThemeProvider>,
|
||||
);
|
||||
|
||||
expect(document.documentElement.getAttribute('data-theme-mode')).toBe(
|
||||
'light',
|
||||
);
|
||||
|
||||
act(() => {
|
||||
mockThemeController.getCurrentModeResolved.mockReturnValue('dark');
|
||||
mockOnChangeCallback(mockDarkTheme);
|
||||
});
|
||||
|
||||
expect(document.documentElement.getAttribute('data-theme-mode')).toBe(
|
||||
'dark',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
from collections.abc import Sequence
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Optional, Union
|
||||
from uuid import UUID
|
||||
@@ -196,7 +197,7 @@ class BaseReportState:
|
||||
db.session.commit() # pylint: disable=consider-using-transaction
|
||||
except StaleDataError as ex:
|
||||
# Report schedule was modified or deleted by another process
|
||||
db.session.rollback()
|
||||
db.session.rollback() # pylint: disable=consider-using-transaction
|
||||
logger.warning(
|
||||
"Report schedule (execution %s) was modified or deleted "
|
||||
"during execution. This can occur when a report is deleted "
|
||||
@@ -280,6 +281,7 @@ class BaseReportState:
|
||||
)
|
||||
urls = self._get_tabs_urls(
|
||||
anchor_list,
|
||||
dashboard_state=dashboard_state,
|
||||
native_filter_params=native_filter_params,
|
||||
user_friendly=user_friendly,
|
||||
)
|
||||
@@ -291,12 +293,9 @@ class BaseReportState:
|
||||
# overwriting — dashboard_state may already have urlParams
|
||||
# (e.g. standalone=true) that must be preserved.
|
||||
state: DashboardPermalinkState = {**dashboard_state}
|
||||
existing_params: list[tuple[str, str]] = state.get("urlParams") or []
|
||||
merged_params: list[list[str]] = [
|
||||
list(p) for p in existing_params if p[0] != "native_filters"
|
||||
]
|
||||
merged_params.append(["native_filters", native_filter_params or ""])
|
||||
state["urlParams"] = merged_params # type: ignore[typeddict-item]
|
||||
state["urlParams"] = self._merge_native_filters_into_url_params(
|
||||
state.get("urlParams"), native_filter_params
|
||||
)
|
||||
return [
|
||||
self._get_tab_url(
|
||||
state,
|
||||
@@ -310,12 +309,17 @@ class BaseReportState:
|
||||
if filter_warnings:
|
||||
self._filter_warnings.extend(filter_warnings)
|
||||
if native_filter_params and native_filter_params != "()":
|
||||
# Preserve any urlParams from extra.dashboard (e.g. standalone=true)
|
||||
# set via API even when ALERT_REPORT_TABS is off — same merge
|
||||
# semantics as the protected branch above.
|
||||
fallback_state = self._report_schedule.extra.get("dashboard") or {}
|
||||
return [
|
||||
self._get_tab_url(
|
||||
{
|
||||
"urlParams": [
|
||||
["native_filters", native_filter_params] # type: ignore
|
||||
],
|
||||
"urlParams": self._merge_native_filters_into_url_params(
|
||||
fallback_state.get("urlParams"),
|
||||
native_filter_params,
|
||||
)
|
||||
},
|
||||
user_friendly=user_friendly,
|
||||
)
|
||||
@@ -353,24 +357,51 @@ class BaseReportState:
|
||||
user_friendly=user_friendly,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _merge_native_filters_into_url_params(
|
||||
existing: Optional[Sequence[Sequence[str]]],
|
||||
native_filter_params: Optional[str],
|
||||
) -> list[Sequence[str]]:
|
||||
"""
|
||||
Merge the report's ``native_filters`` into a permalink's existing
|
||||
``urlParams``, deduping any prior ``native_filters`` entry so the
|
||||
report's value wins. All other params (e.g. ``standalone=true``)
|
||||
survive in their original order.
|
||||
"""
|
||||
merged: list[Sequence[str]] = [
|
||||
list(p) for p in (existing or []) if p[0] != "native_filters"
|
||||
]
|
||||
merged.append(["native_filters", native_filter_params or ""])
|
||||
return merged
|
||||
|
||||
def _get_tabs_urls(
|
||||
self,
|
||||
tab_anchors: list[str],
|
||||
dashboard_state: Optional[DashboardPermalinkState] = None,
|
||||
native_filter_params: Optional[str] = None,
|
||||
user_friendly: bool = False,
|
||||
) -> list[str]:
|
||||
"""
|
||||
Get multple tabs urls
|
||||
Get multiple tabs urls.
|
||||
|
||||
Each per-tab permalink merges the report's ``native_filters`` into
|
||||
the original ``dashboard_state.urlParams`` (deduping any prior
|
||||
``native_filters`` entry), so params like ``standalone=true`` are
|
||||
preserved — matching the precedence rules of the single-tab branch
|
||||
in :meth:`get_dashboard_urls`.
|
||||
"""
|
||||
base_state: DashboardPermalinkState = dashboard_state or {}
|
||||
merged_params = self._merge_native_filters_into_url_params(
|
||||
base_state.get("urlParams"), native_filter_params
|
||||
)
|
||||
|
||||
return [
|
||||
self._get_tab_url(
|
||||
{
|
||||
"anchor": tab_anchor,
|
||||
"dataMask": None,
|
||||
"activeTabs": None,
|
||||
"urlParams": [
|
||||
["native_filters", native_filter_params] # type: ignore
|
||||
],
|
||||
"urlParams": merged_params,
|
||||
},
|
||||
user_friendly=user_friendly,
|
||||
)
|
||||
|
||||
@@ -360,11 +360,18 @@ class QueryObject: # pylint: disable=too-many-instance-attributes
|
||||
engine = database.db_engine_spec.engine
|
||||
|
||||
if needs_transpilation:
|
||||
clause = transpile_to_dialect(clause, engine)
|
||||
# source_engine=engine ensures idempotency: this
|
||||
# method can run more than once (validate() is called
|
||||
# from both raise_for_access and get_df_payload), so
|
||||
# the second pass must be able to re-parse the
|
||||
# dialect-specific output (e.g. BigQuery backticks)
|
||||
# produced by the first pass.
|
||||
clause = transpile_to_dialect(
|
||||
clause, engine, source_engine=engine, identify=True
|
||||
)
|
||||
|
||||
sanitized_clause = sanitize_clause(clause, engine)
|
||||
if sanitized_clause != clause:
|
||||
self.extras[param] = sanitized_clause
|
||||
self.extras[param] = sanitized_clause
|
||||
except QueryClauseValidationException as ex:
|
||||
raise QueryObjectValidationError(ex.message) from ex
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Optional, TypedDict
|
||||
|
||||
|
||||
@@ -21,7 +22,10 @@ class DashboardPermalinkState(TypedDict, total=False):
|
||||
dataMask: Optional[dict[str, Any]]
|
||||
activeTabs: Optional[list[str]]
|
||||
anchor: Optional[str]
|
||||
urlParams: Optional[list[tuple[str, str]]]
|
||||
# urlParams items are stored/transmitted as JSON arrays, so they
|
||||
# arrive at runtime as ``list[str]``; ``Sequence[str]`` keeps the
|
||||
# annotation permissive of both list and tuple shapes.
|
||||
urlParams: Optional[list[Sequence[str]]]
|
||||
chartStates: Optional[dict[str, Any]]
|
||||
|
||||
|
||||
|
||||
@@ -199,8 +199,12 @@ async def get_chart_data( # noqa: C901
|
||||
|
||||
if not chart:
|
||||
await ctx.warning("Chart not found: identifier=%s" % (request.identifier,))
|
||||
safe_id = str(request.identifier)[:200]
|
||||
return ChartError(
|
||||
error=f"No chart found with identifier: {request.identifier}",
|
||||
error=(
|
||||
f"No chart found with identifier: {safe_id}."
|
||||
" Use list_charts to get valid chart IDs."
|
||||
),
|
||||
error_type="NotFound",
|
||||
)
|
||||
|
||||
|
||||
@@ -1192,8 +1192,22 @@ async def _get_chart_preview_internal( # noqa: C901
|
||||
|
||||
if not chart:
|
||||
await ctx.warning("Chart not found: identifier=%s" % (request.identifier,))
|
||||
safe_id = str(request.identifier)[:200]
|
||||
is_form_data_key = (
|
||||
isinstance(request.identifier, str)
|
||||
and len(request.identifier) > 8
|
||||
and not request.identifier.isdigit()
|
||||
)
|
||||
if is_form_data_key:
|
||||
recovery = (
|
||||
"If using a form_data_key, it may have expired — "
|
||||
"use generate_explore_link to get a fresh key, "
|
||||
"or use list_charts to find a saved chart by ID."
|
||||
)
|
||||
else:
|
||||
recovery = "Use list_charts to get valid chart IDs."
|
||||
return ChartError(
|
||||
error=f"No chart found with identifier: {request.identifier}",
|
||||
error=f"No chart found with identifier: {safe_id}. {recovery}",
|
||||
error_type="NotFound",
|
||||
)
|
||||
|
||||
|
||||
@@ -337,17 +337,18 @@ async def update_chart( # noqa: C901
|
||||
chart = find_chart_by_identifier(request.identifier)
|
||||
|
||||
if not chart:
|
||||
safe_id = str(request.identifier)[:200]
|
||||
not_found_msg = (
|
||||
f"No chart found with identifier: {safe_id}."
|
||||
" Use list_charts to get valid chart IDs."
|
||||
)
|
||||
return GenerateChartResponse.model_validate(
|
||||
{
|
||||
"chart": None,
|
||||
"error": {
|
||||
"error_type": "NotFound",
|
||||
"message": (
|
||||
f"No chart found with identifier: {request.identifier}"
|
||||
),
|
||||
"details": (
|
||||
f"No chart found with identifier: {request.identifier}"
|
||||
),
|
||||
"message": not_found_msg,
|
||||
"details": not_found_msg,
|
||||
},
|
||||
"success": False,
|
||||
"schema_version": "2.0",
|
||||
|
||||
@@ -334,7 +334,10 @@ def _find_and_authorize_dashboard(
|
||||
dashboard=None,
|
||||
dashboard_url=None,
|
||||
position=None,
|
||||
error=f"Dashboard with ID {dashboard_id} not found",
|
||||
error=(
|
||||
f"Dashboard with ID {dashboard_id} not found."
|
||||
" Use list_dashboards to get valid dashboard IDs."
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -392,7 +395,10 @@ def add_chart_to_existing_dashboard(
|
||||
dashboard=None,
|
||||
dashboard_url=None,
|
||||
position=None,
|
||||
error=f"Chart with ID {request.chart_id} not found",
|
||||
error=(
|
||||
f"Chart with ID {request.chart_id} not found."
|
||||
" Use list_charts to get valid chart IDs."
|
||||
),
|
||||
)
|
||||
|
||||
# Validate dataset access for the chart.
|
||||
|
||||
@@ -230,7 +230,10 @@ def generate_dashboard( # noqa: C901
|
||||
return GenerateDashboardResponse(
|
||||
dashboard=None,
|
||||
dashboard_url=None,
|
||||
error=f"Charts not found: {list(missing_chart_ids)}",
|
||||
error=(
|
||||
f"Charts not found: {list(missing_chart_ids)}."
|
||||
" Use list_charts to get valid chart IDs."
|
||||
),
|
||||
)
|
||||
|
||||
# Validate dataset access for each chart.
|
||||
|
||||
@@ -183,7 +183,10 @@ async def query_dataset( # noqa: C901
|
||||
if dataset is None:
|
||||
await ctx.error("Dataset not found: identifier=%s" % (request.dataset_id,))
|
||||
return DatasetError.create(
|
||||
error=f"No dataset found with identifier: {request.dataset_id}",
|
||||
error=(
|
||||
f"No dataset found with identifier: {request.dataset_id}."
|
||||
" Use list_datasets to get valid dataset IDs."
|
||||
),
|
||||
error_type="NotFound",
|
||||
)
|
||||
|
||||
|
||||
@@ -100,7 +100,10 @@ async def execute_sql(request: ExecuteSqlRequest, ctx: Context) -> ExecuteSqlRes
|
||||
)
|
||||
return ExecuteSqlResponse(
|
||||
success=False,
|
||||
error=f"Database with ID {request.database_id} not found",
|
||||
error=(
|
||||
f"Database with ID {request.database_id} not found."
|
||||
" Use list_databases to get valid database IDs."
|
||||
),
|
||||
error_type=SupersetErrorType.DATABASE_NOT_FOUND_ERROR.value,
|
||||
)
|
||||
|
||||
|
||||
@@ -103,7 +103,8 @@ def open_sql_lab_with_context(
|
||||
database = DatabaseDAO.find_by_id(request.database_connection_id)
|
||||
if not database:
|
||||
error_message = (
|
||||
f"Database with ID {request.database_connection_id} not found"
|
||||
f"Database with ID {request.database_connection_id} not found."
|
||||
" Use list_databases to get valid database IDs."
|
||||
)
|
||||
return _sanitize_sql_lab_response_for_llm_context(
|
||||
SqlLabResponse(
|
||||
|
||||
@@ -3084,6 +3084,14 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
|
||||
sqla_col = self.convert_tbl_column_to_sqla_col(
|
||||
tbl_column=col_obj, template_processor=template_processor
|
||||
)
|
||||
# Parenthesize expression-based columns to prevent operator
|
||||
# precedence issues (e.g. OR in a calculated column breaking
|
||||
# surrounding AND filters). Same pattern as extras.where
|
||||
# wrapping added in PR #38183.
|
||||
if sqla_col is not None and (
|
||||
(col_obj and col_obj.expression) or is_adhoc_column(flt_col)
|
||||
):
|
||||
sqla_col = Grouping(sqla_col)
|
||||
col_type = col_obj.type if col_obj else None
|
||||
col_spec = db_engine_spec.get_column_spec(native_type=col_type)
|
||||
is_list_target = op in (
|
||||
|
||||
@@ -1568,6 +1568,7 @@ def transpile_to_dialect(
|
||||
sql: str,
|
||||
target_engine: str,
|
||||
source_engine: str | None = None,
|
||||
identify: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Transpile SQL from one database dialect to another using SQLGlot.
|
||||
@@ -1576,6 +1577,7 @@ def transpile_to_dialect(
|
||||
sql: The SQL query to transpile
|
||||
target_engine: The target database engine (e.g., "mysql", "postgresql")
|
||||
source_engine: The source database engine. If None, uses generic SQL dialect.
|
||||
identify: If True, quote all identifiers per the target dialect.
|
||||
|
||||
Returns:
|
||||
The transpiled SQL string
|
||||
@@ -1598,6 +1600,7 @@ def transpile_to_dialect(
|
||||
copy=True,
|
||||
comments=False,
|
||||
pretty=False,
|
||||
identify=identify,
|
||||
)
|
||||
except ParseError as ex:
|
||||
raise QueryClauseValidationException(f"Cannot parse SQL clause: {sql}") from ex
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -624,6 +624,65 @@ def test_get_tab_urls(
|
||||
]
|
||||
|
||||
|
||||
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
|
||||
@with_feature_flags(ALERT_REPORT_TABS=True)
|
||||
def test_get_dashboard_urls_multitab_preserves_url_params(
|
||||
mock_permalink_cls,
|
||||
mocker: MockerFixture,
|
||||
app,
|
||||
) -> None:
|
||||
"""Multi-tab fan-out must preserve dashboard_state.urlParams (e.g. standalone)
|
||||
and replace any pre-existing native_filters entry with the report's value —
|
||||
matching the single-tab branch's merge semantics."""
|
||||
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
|
||||
mock_report_schedule.chart = False
|
||||
mock_report_schedule.chart_id = None
|
||||
mock_report_schedule.dashboard_id = 123
|
||||
mock_report_schedule.type = "report_type"
|
||||
mock_report_schedule.report_format = "report_format"
|
||||
mock_report_schedule.owners = [1, 2]
|
||||
mock_report_schedule.recipients = []
|
||||
native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
|
||||
# Use list-of-lists (not tuples) — extra_json deserializes urlParams from
|
||||
# JSON arrays. Includes a stale native_filters entry to exercise the
|
||||
# dedup-then-append step in the merge.
|
||||
mock_report_schedule.extra = {
|
||||
"dashboard": {
|
||||
"anchor": json.dumps(["TAB-1", "TAB-2"]),
|
||||
"urlParams": [
|
||||
["standalone", "true"],
|
||||
["native_filters", "(STALE_FILTER:(filterType:filter_select))"],
|
||||
["show_filters", "0"],
|
||||
],
|
||||
}
|
||||
}
|
||||
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
|
||||
native_filter_rison,
|
||||
[],
|
||||
)
|
||||
mock_permalink_cls.return_value.run.side_effect = ["key1", "key2"]
|
||||
|
||||
class_instance: BaseReportState = BaseReportState(
|
||||
mock_report_schedule, "January 1, 2021", "execution_id_example"
|
||||
)
|
||||
class_instance._report_schedule = mock_report_schedule
|
||||
|
||||
class_instance.get_dashboard_urls()
|
||||
|
||||
assert mock_permalink_cls.call_count == 2
|
||||
for idx, expected_anchor in enumerate(["TAB-1", "TAB-2"]):
|
||||
state = mock_permalink_cls.call_args_list[idx].kwargs["state"]
|
||||
# Stale native_filters is replaced (not duplicated); other params
|
||||
# survive in their original order; report's native_filters appended.
|
||||
assert state["urlParams"] == [
|
||||
["standalone", "true"],
|
||||
["show_filters", "0"],
|
||||
["native_filters", native_filter_rison],
|
||||
]
|
||||
# Each per-tab permalink targets exactly that tab.
|
||||
assert state["anchor"] == expected_anchor
|
||||
|
||||
|
||||
@patch(
|
||||
"superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
|
||||
)
|
||||
@@ -702,6 +761,58 @@ def test_get_dashboard_urls_native_filters_without_tabs(
|
||||
assert "permalink_key" in result[0]
|
||||
|
||||
|
||||
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
|
||||
@with_feature_flags(ALERT_REPORT_TABS=False)
|
||||
def test_get_dashboard_urls_flag_off_preserves_url_params(
|
||||
mock_permalink_cls,
|
||||
mocker: MockerFixture,
|
||||
app,
|
||||
) -> None:
|
||||
"""The post-``if``-block fall-through in ``get_dashboard_urls`` must
|
||||
honor any urlParams set in ``extra.dashboard`` (e.g. via API) — same
|
||||
merge semantics as the protected branch.
|
||||
|
||||
Reachability: only when ``dashboard_state`` is falsy OR
|
||||
``ALERT_REPORT_TABS=False``. The flag-on / no-anchor case lands in
|
||||
the single-tab merge at L290-306, not here.
|
||||
"""
|
||||
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
|
||||
mock_report_schedule.chart = False
|
||||
mock_report_schedule.chart_id = None
|
||||
mock_report_schedule.dashboard_id = 123
|
||||
native_filter_rison = "(NATIVE_FILTER-abc:!(val1))"
|
||||
mock_report_schedule.extra = {
|
||||
"dashboard": {
|
||||
"urlParams": [
|
||||
["standalone", "true"],
|
||||
["native_filters", "(STALE_FILTER:!(stale))"],
|
||||
["show_filters", "0"],
|
||||
],
|
||||
}
|
||||
}
|
||||
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
|
||||
native_filter_rison,
|
||||
[],
|
||||
)
|
||||
|
||||
class_instance: BaseReportState = BaseReportState(
|
||||
mock_report_schedule, "January 1, 2021", "execution_id_example"
|
||||
)
|
||||
class_instance._report_schedule = mock_report_schedule
|
||||
mock_permalink_cls.return_value.run.return_value = "permalink_key"
|
||||
|
||||
class_instance.get_dashboard_urls()
|
||||
|
||||
state = mock_permalink_cls.call_args_list[0].kwargs["state"]
|
||||
# Stale native_filters replaced; existing params survive in order;
|
||||
# report's native_filters appended.
|
||||
assert state["urlParams"] == [
|
||||
["standalone", "true"],
|
||||
["show_filters", "0"],
|
||||
["native_filters", native_filter_rison],
|
||||
]
|
||||
|
||||
|
||||
def create_report_schedule(
|
||||
mocker: MockerFixture,
|
||||
custom_width: int | None = None,
|
||||
|
||||
@@ -298,7 +298,8 @@ class TestOpenSqlLabWithContext:
|
||||
field_path=("title",),
|
||||
)
|
||||
assert response.error == sanitize_for_llm_context(
|
||||
"Database with ID 404 not found",
|
||||
"Database with ID 404 not found."
|
||||
" Use list_databases to get valid database IDs.",
|
||||
field_path=("error",),
|
||||
)
|
||||
finally:
|
||||
|
||||
@@ -1937,6 +1937,235 @@ def test_extras_having_is_parenthesized(
|
||||
)
|
||||
|
||||
|
||||
def test_calculated_column_filter_is_parenthesized(
|
||||
database: Database,
|
||||
) -> None:
|
||||
"""
|
||||
Test that calculated column expressions containing OR are wrapped in
|
||||
parentheses when used in WHERE filters.
|
||||
|
||||
Without parentheses, a calculated column expression like
|
||||
``status = 'active' OR status = 'pending'`` combined with other filters
|
||||
via AND would produce unexpected evaluation order due to SQL operator
|
||||
precedence (AND binds tighter than OR), potentially dropping time range
|
||||
and other filters. Same class of bug as fixed in PR #38183 for
|
||||
extras.where/having, but on the calculated column filter path.
|
||||
"""
|
||||
from superset.connectors.sqla.models import SqlaTable, TableColumn
|
||||
|
||||
table = SqlaTable(
|
||||
database=database,
|
||||
schema=None,
|
||||
table_name="t",
|
||||
columns=[
|
||||
TableColumn(column_name="a", type="INTEGER"),
|
||||
TableColumn(
|
||||
column_name="is_active",
|
||||
expression="status = 'active' OR status = 'pending'",
|
||||
type="BOOLEAN",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
sqla_query = table.get_sqla_query(
|
||||
columns=["a"],
|
||||
filter=[
|
||||
{
|
||||
"col": "is_active",
|
||||
"op": "IS TRUE",
|
||||
"val": None,
|
||||
},
|
||||
],
|
||||
extras={},
|
||||
is_timeseries=False,
|
||||
metrics=[],
|
||||
)
|
||||
|
||||
with database.get_sqla_engine() as engine:
|
||||
sql = str(
|
||||
sqla_query.sqla_query.compile(
|
||||
dialect=engine.dialect,
|
||||
compile_kwargs={"literal_binds": True},
|
||||
)
|
||||
)
|
||||
|
||||
assert "(status = 'active' OR status = 'pending')" in sql, (
|
||||
f"Calculated column expression should be wrapped in parentheses. "
|
||||
f"Generated SQL: {sql}"
|
||||
)
|
||||
|
||||
|
||||
def test_calculated_column_nested_or_and_is_parenthesized(
|
||||
database: Database,
|
||||
) -> None:
|
||||
"""
|
||||
Test that calculated column expressions with nested OR/AND combinations
|
||||
are correctly parenthesized as a single unit in WHERE filters.
|
||||
"""
|
||||
from superset.connectors.sqla.models import SqlaTable, TableColumn
|
||||
|
||||
table = SqlaTable(
|
||||
database=database,
|
||||
schema=None,
|
||||
table_name="t",
|
||||
columns=[
|
||||
TableColumn(column_name="a", type="INTEGER"),
|
||||
TableColumn(
|
||||
column_name="is_target",
|
||||
expression=(
|
||||
"(status = 'active' AND region = 'US') "
|
||||
"OR (status = 'pending' AND region = 'EU')"
|
||||
),
|
||||
type="BOOLEAN",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
sqla_query = table.get_sqla_query(
|
||||
columns=["a"],
|
||||
filter=[
|
||||
{
|
||||
"col": "is_target",
|
||||
"op": "IS TRUE",
|
||||
"val": None,
|
||||
},
|
||||
],
|
||||
extras={},
|
||||
is_timeseries=False,
|
||||
metrics=[],
|
||||
)
|
||||
|
||||
with database.get_sqla_engine() as engine:
|
||||
sql = str(
|
||||
sqla_query.sqla_query.compile(
|
||||
dialect=engine.dialect,
|
||||
compile_kwargs={"literal_binds": True},
|
||||
)
|
||||
)
|
||||
|
||||
assert (
|
||||
"((status = 'active' AND region = 'US') "
|
||||
"OR (status = 'pending' AND region = 'EU'))"
|
||||
) in sql, (
|
||||
f"Nested OR/AND expression should be wrapped in parentheses. "
|
||||
f"Generated SQL: {sql}"
|
||||
)
|
||||
|
||||
|
||||
def test_calculated_column_non_boolean_filter_is_parenthesized(
|
||||
database: Database,
|
||||
) -> None:
|
||||
"""
|
||||
Test that non-boolean calculated column expressions are parenthesized
|
||||
when used with IN filters.
|
||||
"""
|
||||
from superset.connectors.sqla.models import SqlaTable, TableColumn
|
||||
|
||||
table = SqlaTable(
|
||||
database=database,
|
||||
schema=None,
|
||||
table_name="t",
|
||||
columns=[
|
||||
TableColumn(column_name="a", type="INTEGER"),
|
||||
TableColumn(
|
||||
column_name="full_name",
|
||||
expression="first_name || ' ' || last_name",
|
||||
type="TEXT",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
sqla_query = table.get_sqla_query(
|
||||
columns=["a"],
|
||||
filter=[
|
||||
{
|
||||
"col": "full_name",
|
||||
"op": "IN",
|
||||
"val": ["John Doe", "Jane Doe"],
|
||||
},
|
||||
],
|
||||
extras={},
|
||||
is_timeseries=False,
|
||||
metrics=[],
|
||||
)
|
||||
|
||||
with database.get_sqla_engine() as engine:
|
||||
sql = str(
|
||||
sqla_query.sqla_query.compile(
|
||||
dialect=engine.dialect,
|
||||
compile_kwargs={"literal_binds": True},
|
||||
)
|
||||
)
|
||||
|
||||
assert "(first_name || ' ' || last_name)" in sql, (
|
||||
f"Non-boolean calculated column should be wrapped in parentheses. "
|
||||
f"Generated SQL: {sql}"
|
||||
)
|
||||
|
||||
|
||||
def test_multiple_calculated_columns_each_parenthesized(
|
||||
database: Database,
|
||||
) -> None:
|
||||
"""
|
||||
Test that multiple calculated columns used as filters are each
|
||||
independently wrapped in parentheses.
|
||||
"""
|
||||
from superset.connectors.sqla.models import SqlaTable, TableColumn
|
||||
|
||||
table = SqlaTable(
|
||||
database=database,
|
||||
schema=None,
|
||||
table_name="t",
|
||||
columns=[
|
||||
TableColumn(column_name="a", type="INTEGER"),
|
||||
TableColumn(
|
||||
column_name="is_active",
|
||||
expression="status = 'active' OR status = 'pending'",
|
||||
type="BOOLEAN",
|
||||
),
|
||||
TableColumn(
|
||||
column_name="is_premium",
|
||||
expression="tier = 'gold' OR tier = 'platinum'",
|
||||
type="BOOLEAN",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
sqla_query = table.get_sqla_query(
|
||||
columns=["a"],
|
||||
filter=[
|
||||
{
|
||||
"col": "is_active",
|
||||
"op": "IS TRUE",
|
||||
"val": None,
|
||||
},
|
||||
{
|
||||
"col": "is_premium",
|
||||
"op": "IS TRUE",
|
||||
"val": None,
|
||||
},
|
||||
],
|
||||
extras={},
|
||||
is_timeseries=False,
|
||||
metrics=[],
|
||||
)
|
||||
|
||||
with database.get_sqla_engine() as engine:
|
||||
sql = str(
|
||||
sqla_query.sqla_query.compile(
|
||||
dialect=engine.dialect,
|
||||
compile_kwargs={"literal_binds": True},
|
||||
)
|
||||
)
|
||||
|
||||
assert "(status = 'active' OR status = 'pending')" in sql, (
|
||||
f"First calculated column should be parenthesized. Generated SQL: {sql}"
|
||||
)
|
||||
assert "(tier = 'gold' OR tier = 'platinum')" in sql, (
|
||||
f"Second calculated column should be parenthesized. Generated SQL: {sql}"
|
||||
)
|
||||
|
||||
|
||||
def _run_probe(
|
||||
database: Database,
|
||||
type_probe_needs_row: bool = False,
|
||||
|
||||
@@ -396,3 +396,127 @@ def test_transpile_unknown_source_engine_uses_generic() -> None:
|
||||
"SELECT * FROM orders", "postgresql", "unknown_engine"
|
||||
)
|
||||
assert result == "SELECT * FROM orders"
|
||||
|
||||
|
||||
# Tests for identify=True (identifier quoting)
|
||||
@pytest.mark.parametrize(
|
||||
"sql,dialect,expected",
|
||||
[
|
||||
# PostgreSQL - double-quoted identifiers
|
||||
(
|
||||
"STATE ILIKE '%AL%'",
|
||||
"postgresql",
|
||||
"\"STATE\" ILIKE '%AL%'",
|
||||
),
|
||||
# MySQL - backtick-quoted identifiers, ILIKE transpiled
|
||||
(
|
||||
"STATE ILIKE '%AL%'",
|
||||
"mysql",
|
||||
"LOWER(`STATE`) LIKE LOWER('%AL%')",
|
||||
),
|
||||
# BigQuery - backtick-quoted identifiers, ILIKE transpiled
|
||||
(
|
||||
"STATE ILIKE '%AL%'",
|
||||
"bigquery",
|
||||
"LOWER(`STATE`) LIKE LOWER('%AL%')",
|
||||
),
|
||||
# Snowflake - double-quoted identifiers
|
||||
(
|
||||
"STATE ILIKE '%AL%'",
|
||||
"snowflake",
|
||||
"\"STATE\" ILIKE '%AL%'",
|
||||
),
|
||||
# MSSQL - bracket-quoted identifiers
|
||||
(
|
||||
"STATE = 'CA'",
|
||||
"mssql",
|
||||
"[STATE] = 'CA'",
|
||||
),
|
||||
# Compound filter with multiple identifiers
|
||||
(
|
||||
"STATE = 'CA' AND AIRLINE = 'Delta'",
|
||||
"postgresql",
|
||||
"\"STATE\" = 'CA' AND \"AIRLINE\" = 'Delta'",
|
||||
),
|
||||
# Lowercase identifiers also get quoted
|
||||
(
|
||||
"name = 'test'",
|
||||
"postgresql",
|
||||
"\"name\" = 'test'",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_identify_quotes_identifiers(sql: str, dialect: str, expected: str) -> None:
|
||||
"""Test that identify=True quotes identifiers per target dialect."""
|
||||
assert transpile_to_dialect(sql, dialect, identify=True) == expected
|
||||
|
||||
|
||||
def test_identify_unknown_engine_returns_unchanged() -> None:
|
||||
"""Test that identify=True has no effect on unknown engines."""
|
||||
sql = "STATE = 'CA'"
|
||||
assert transpile_to_dialect(sql, "unknown_engine", identify=True) == sql
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"sql,engine,expected",
|
||||
[
|
||||
(
|
||||
"STATE ILIKE '%AL%'",
|
||||
"postgresql",
|
||||
"\"STATE\" ILIKE '%AL%'",
|
||||
),
|
||||
(
|
||||
"country ILIKE '%Italy%'",
|
||||
"bigquery",
|
||||
"LOWER(`country`) LIKE LOWER('%Italy%')",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_identify_with_source_engine(sql: str, engine: str, expected: str) -> None:
|
||||
"""Test identify=True with source_engine matching target engine."""
|
||||
result = transpile_to_dialect(sql, engine, source_engine=engine, identify=True)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"engine",
|
||||
["postgresql", "bigquery", "mysql", "snowflake"],
|
||||
)
|
||||
def test_identify_transpilation_is_idempotent(engine: str) -> None:
|
||||
"""Test that transpiling twice produces the same result (idempotent).
|
||||
|
||||
This matters because _sanitize_filters() can be called multiple times
|
||||
via validate().
|
||||
"""
|
||||
clause = "STATE ILIKE '%AL%'"
|
||||
pass1 = transpile_to_dialect(clause, engine, source_engine=engine, identify=True)
|
||||
pass2 = transpile_to_dialect(pass1, engine, source_engine=engine, identify=True)
|
||||
assert pass1 == pass2
|
||||
|
||||
|
||||
def test_sanitize_filters_writes_back_transpiled_clause() -> None:
|
||||
"""Test that _sanitize_filters always persists the transpiled clause.
|
||||
|
||||
Regression test: a previous conditional `if sanitized_clause != clause`
|
||||
skipped the write-back when transpile_to_dialect had already modified
|
||||
the clause, leaving the original unquoted value in extras.
|
||||
"""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from superset.common.query_object import QueryObject
|
||||
|
||||
mock_datasource = MagicMock()
|
||||
mock_datasource.database.db_engine_spec.engine = "postgresql"
|
||||
|
||||
query_obj = QueryObject(
|
||||
datasource=mock_datasource,
|
||||
columns=["STATE"],
|
||||
metrics=[],
|
||||
extras={
|
||||
"where": "STATE = 'CA'",
|
||||
"transpile_to_dialect": True,
|
||||
},
|
||||
)
|
||||
query_obj.validate()
|
||||
|
||||
assert '"STATE"' in query_obj.extras["where"]
|
||||
|
||||
Reference in New Issue
Block a user