Compare commits

..

4 Commits

Author SHA1 Message Date
Enzo Martellucci
a4dba4f0c2 Merge branch 'master' into enxdev/fix/alerts-reports-filter-time-range-multitab-timeout 2026-05-05 18:28:49 +02:00
Enzo Martellucci
012094aaf9 refactor(reports): extract native_filters urlParams merge into a helper 2026-05-05 18:28:12 +02:00
Enzo Martellucci
460300ccdf test(reports): clarify flag-off urlParams test scope
Address review feedback on the flag-off urlParams preservation test:

- Drop dead `force_screenshot = False` — the merge branch returns early
  so the attribute is never read. Mock auto-attributes are truthy by
  default; the matching multi-tab test doesn't set it either.
- Expand the docstring with a "Reachability:" note explaining that the
  fall-through is reached only when `dashboard_state` is falsy OR
  `ALERT_REPORT_TABS=False`; the flag-on / no-anchor case lands in the
  single-tab merge at L290-306 (already correct since before this PR),
  not in this branch.

No production change.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-05 10:48:41 +02:00
Enzo Martellucci
f9c665f8a5 fix(reports): preserve dashboard_state urlParams in report URL builders
`_get_tabs_urls` and the `ALERT_REPORT_TABS`-disabled fall-through in
`get_dashboard_urls` were both building permalinks with
`urlParams=[["native_filters", <rison>]]` only, silently discarding
everything else `extra.dashboard.urlParams` carried — notably
`standalone=true`, which the single-tab branch already preserves
(see `get_dashboard_urls` lines 290-306).

The list-anchor shape that triggers the multi-tab path is reachable
from the standard Reports UI: when a dashboard has at least 2 tabs,
the AlertReportModal injects an "All Tabs" option whose value is
`JSON.stringify(allTabsWithOrder)` (see `AlertReportModal.tsx`
~L1094-1099). Picking it makes the backend fan out per-tab screenshots,
each missing `standalone=true`, which causes Playwright to time out at
`Locator.wait_for(".standalone")` after the 10-minute hard timeout.
Recipients get a failure email instead of the screenshots.

Apply the same merge semantics in both spots: dedup any existing
`native_filters` entry from the original `urlParams` and append the
report's. Other per-tab fields (`anchor`, `dataMask`, `activeTabs`)
keep their previous shape — this fix is scoped to the `urlParams`
discard. The `ALERT_REPORT_TABS=False` fall-through previously didn't
honor `extra.dashboard.urlParams` at all (only API-set, since the
modal is gated on the flag); now it does, matching the on-flag path.

Drive-bys (same area, kept tight):
- `DashboardPermalinkState.urlParams` annotation: `list[tuple[str, str]]`
  -> `list[Sequence[str]]`. JSON-derived values arrive as
  `list[list[str]]` at runtime; `Sequence` is permissive of both shapes.
- Single-tab branch and the merged fall-through use the new annotation;
  obsolete `# type: ignore` comments removed.
- Silenced a pre-existing pylint `consider-using-transaction` warning
  on `db.session.rollback()` in `create_log` that the pre-commit hook
  was sporadically failing on; same disable comment as the surrounding
  `db.session.commit()`.

Tests:
- `test_get_dashboard_urls_multitab_preserves_url_params` seeds
  `urlParams` with `standalone`, `show_filters`, AND a stale
  `native_filters` entry, and asserts the merge replaces the stale
  entry, preserves the others in order, and targets each per-tab
  permalink at its anchor.
- `test_get_dashboard_urls_flag_off_preserves_url_params` does the
  same for the `ALERT_REPORT_TABS=False` fall-through.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-05 10:38:59 +02:00
35 changed files with 377 additions and 3085 deletions

View File

@@ -114,7 +114,7 @@ dependencies = [
[project.optional-dependencies]
athena = ["pyathena[pandas]>=2, <4"]
athena = ["pyathena[pandas]>=2, <3"]
aurora-data-api = ["preset-sqlalchemy-aurora-data-api>=0.2.8,<0.3"]
bigquery = [
"pandas-gbq>=0.19.1",
@@ -135,7 +135,7 @@ databricks = [
"databricks-sqlalchemy==1.0.5",
]
db2 = ["ibm-db-sa>0.3.8, <=0.4.0"]
denodo = ["denodo-sqlalchemy>=1.0.6,<2.1.0"]
denodo = ["denodo-sqlalchemy~=1.0.6"]
dremio = ["sqlalchemy-dremio>=1.2.1, <4"]
drill = ["sqlalchemy-drill>=1.1.4, <2"]
druid = ["pydruid>=0.6.5,<0.7"]
@@ -158,7 +158,7 @@ hive = [
"thrift>=0.14.1, <1.0.0",
"thrift_sasl>=0.4.3, < 1.0.0",
]
impala = ["impyla>0.16.2, <0.23"]
impala = ["impyla>0.16.2, <0.17"]
kusto = ["sqlalchemy-kusto>=3.0.0, <4"]
kylin = ["kylinpy>=2.8.1, <2.9"]
mssql = ["pymssql>=2.2.8, <3"]
@@ -171,7 +171,7 @@ ocient = [
"shapely",
"geojson",
]
oracle = ["cx-Oracle>8.0.0, <8.4"]
oracle = ["cx-Oracle>8.0.0, <8.1"]
parseable = ["sqlalchemy-parseable>=0.1.3,<0.2.0"]
pinot = ["pinotdb>=5.0.0, <6.0.0"]
playwright = ["playwright>=1.37.0, <2"]
@@ -197,7 +197,7 @@ tdengine = [
]
teradata = ["teradatasql>=16.20.0.23"]
thumbnails = [] # deprecated, will be removed in 7.0
vertica = ["sqlalchemy-vertica-python>= 0.5.9, < 0.7"]
vertica = ["sqlalchemy-vertica-python>=0.5.9, < 0.6"]
netezza = ["nzalchemy>=11.0.2"]
starrocks = ["starrocks>=1.0.0"]
doris = ["pydoris>=1.0.0, <2.0.0"]

View File

@@ -115,7 +115,7 @@
"memoize-one": "^5.2.1",
"mousetrap": "^1.6.5",
"mustache": "^4.2.0",
"nanoid": "^5.1.11",
"nanoid": "^5.1.9",
"ol": "^10.9.0",
"pretty-ms": "^9.3.0",
"query-string": "9.3.1",
@@ -249,7 +249,7 @@
"eslint-plugin-no-only-tests": "^3.4.0",
"eslint-plugin-prettier": "^5.5.5",
"eslint-plugin-react-prefer-function-component": "^5.0.0",
"eslint-plugin-react-you-might-not-need-an-effect": "^0.10.0",
"eslint-plugin-react-you-might-not-need-an-effect": "^0.9.3",
"eslint-plugin-storybook": "^0.8.0",
"eslint-plugin-testing-library": "^7.16.2",
"eslint-plugin-theme-colors": "file:eslint-rules/eslint-plugin-theme-colors",
@@ -264,7 +264,7 @@
"jest-html-reporter": "^4.4.0",
"jest-websocket-mock": "^2.5.0",
"js-yaml-loader": "^1.2.2",
"jsdom": "^29.1.1",
"jsdom": "^29.1.0",
"lerna": "^9.0.4",
"lightningcss": "^1.32.0",
"mini-css-extract-plugin": "^2.10.2",
@@ -22872,9 +22872,9 @@
"license": "MIT"
},
"node_modules/eslint-plugin-react-you-might-not-need-an-effect": {
"version": "0.10.0",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-you-might-not-need-an-effect/-/eslint-plugin-react-you-might-not-need-an-effect-0.10.0.tgz",
"integrity": "sha512-a4pugbQc2zLiE2NZGuXdTjtMNvlP2984QFPDv71eskUYDzigLFYfBL4QjK+RnRtcboHoXRKOcQqEZKxiK6KegA==",
"version": "0.9.3",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-you-might-not-need-an-effect/-/eslint-plugin-react-you-might-not-need-an-effect-0.9.3.tgz",
"integrity": "sha512-44cce7LndBnpDRWBTQ8p7ircIdl2rJBP5+V9Ik64E935UB47uA9ZMU1Uv160lAMhtvoPYqXBjQ+tojr5JF3mFQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -31681,9 +31681,9 @@
}
},
"node_modules/jsdom": {
"version": "29.1.1",
"resolved": "https://registry.npmjs.org/jsdom/-/jsdom-29.1.1.tgz",
"integrity": "sha512-ECi4Fi2f7BdJtUKTflYRTiaMxIB0O6zfR1fX0GXpUrf6flp8QIYn1UT20YQqdSOfk2dfkCwS8LAFoJDEppNK5Q==",
"version": "29.1.0",
"resolved": "https://registry.npmjs.org/jsdom/-/jsdom-29.1.0.tgz",
"integrity": "sha512-YNUc7fB9QuvSSQWfrH0xF+TyABkxUwx8sswgIDaCrw4Hol8BghdZDkITtZheRJeMtzWlnTfsM3bBBusRvpO1wg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -36703,9 +36703,9 @@
}
},
"node_modules/nanoid": {
"version": "5.1.11",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.11.tgz",
"integrity": "sha512-v+KEsUv2ps74PaSKv0gHTxTCgMXOIfBEbaqa6w6ISIGC7ZsvHN4N9oJ8d4cmf0n5oTzQz2SLmThbQWhjd/8eKg==",
"version": "5.1.9",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.9.tgz",
"integrity": "sha512-ZUvP7KeBLe3OZ1ypw6dI/TzYJuvHP77IM4Ry73waSQTLn8/g8rpdjfyVAh7t1/+FjBtG4lCP42MEbDxOsRpBMw==",
"funding": [
{
"type": "github",
@@ -50575,7 +50575,7 @@
"classnames": "^2.5.1",
"d3-array": "^3.2.4",
"lodash": "^4.18.1",
"memoize-one": "^6.0.0",
"memoize-one": "^5.2.1",
"react-table": "^7.8.0",
"regenerator-runtime": "^0.14.1",
"xss": "^1.0.15"
@@ -50606,12 +50606,6 @@
"node": ">=12"
}
},
"plugins/plugin-chart-ag-grid-table/node_modules/memoize-one": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-6.0.0.tgz",
"integrity": "sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==",
"license": "MIT"
},
"plugins/plugin-chart-cartodiagram": {
"name": "@superset-ui/plugin-chart-cartodiagram",
"version": "0.0.1",
@@ -50893,7 +50887,7 @@
"@deck.gl/extensions": "~9.2.9",
"@deck.gl/geo-layers": "~9.2.5",
"@deck.gl/layers": "~9.2.5",
"@deck.gl/mapbox": "^9.3.2",
"@deck.gl/mapbox": "~9.3.1",
"@deck.gl/mesh-layers": "~9.2.5",
"@luma.gl/constants": "~9.2.5",
"@luma.gl/core": "~9.2.5",
@@ -50941,16 +50935,16 @@
}
},
"plugins/preset-chart-deckgl/node_modules/@deck.gl/mapbox": {
"version": "9.3.2",
"resolved": "https://registry.npmjs.org/@deck.gl/mapbox/-/mapbox-9.3.2.tgz",
"integrity": "sha512-+T9pJwsOXwjUxyGN6oiBMfIs28VtDIG1V1Rqz4qqn4TjjNEFFw+xO0olJIg8FO5IAqw2OtePdsrMj0tX8tHdGQ==",
"version": "9.3.1",
"resolved": "https://registry.npmjs.org/@deck.gl/mapbox/-/mapbox-9.3.1.tgz",
"integrity": "sha512-4SgpWMeZiqiZEiz9yPdr89cVRL8HFcvXLxXUA0ExhMreUdNuK/j2OIQHPhw6vp1xCFbJEEqRelQ0pJYkhGDkYw==",
"license": "MIT",
"dependencies": {
"@math.gl/web-mercator": "^4.1.0"
},
"peerDependencies": {
"@deck.gl/core": "~9.3.0",
"@luma.gl/core": "~9.3.3",
"@luma.gl/core": "~9.3.2",
"@math.gl/web-mercator": "^4.1.0"
}
},

View File

@@ -196,7 +196,7 @@
"memoize-one": "^5.2.1",
"mousetrap": "^1.6.5",
"mustache": "^4.2.0",
"nanoid": "^5.1.11",
"nanoid": "^5.1.9",
"ol": "^10.9.0",
"pretty-ms": "^9.3.0",
"query-string": "9.3.1",
@@ -330,7 +330,7 @@
"eslint-plugin-no-only-tests": "^3.4.0",
"eslint-plugin-prettier": "^5.5.5",
"eslint-plugin-react-prefer-function-component": "^5.0.0",
"eslint-plugin-react-you-might-not-need-an-effect": "^0.10.0",
"eslint-plugin-react-you-might-not-need-an-effect": "^0.9.3",
"eslint-plugin-storybook": "^0.8.0",
"eslint-plugin-testing-library": "^7.16.2",
"eslint-plugin-theme-colors": "file:eslint-rules/eslint-plugin-theme-colors",
@@ -345,7 +345,7 @@
"jest-html-reporter": "^4.4.0",
"jest-websocket-mock": "^2.5.0",
"js-yaml-loader": "^1.2.2",
"jsdom": "^29.1.1",
"jsdom": "^29.1.0",
"lerna": "^9.0.4",
"lightningcss": "^1.32.0",
"mini-css-extract-plugin": "^2.10.2",

View File

@@ -18,7 +18,6 @@
*/
import { isMatrixifyVisible } from './matrixifyControls';
import type { ControlStateMapping } from '../types';
/**
* Helper to build a controls object matching the shape used by
@@ -26,7 +25,7 @@ import type { ControlStateMapping } from '../types';
*/
function makeControls(
overrides: Record<string, unknown> = {},
): ControlStateMapping {
): Record<string, { value: unknown }> {
const defaults: Record<string, unknown> = {
matrixify_enable: false,
matrixify_mode_rows: 'disabled',
@@ -37,7 +36,7 @@ function makeControls(
const merged = { ...defaults, ...overrides };
return Object.fromEntries(
Object.entries(merged).map(([k, v]) => [k, { value: v }]),
) as ControlStateMapping;
);
}
// ── matrixify_enable guard ──────────────────────────────────────────

View File

@@ -20,7 +20,7 @@
import { t } from '@apache-superset/core/translation';
import { validateNonEmpty } from '@superset-ui/core';
import { ControlStateMapping, SharedControlConfig } from '../types';
import { SharedControlConfig } from '../types';
import { dndAdhocMetricControl } from './dndControls';
import { defineSavedMetrics } from '../utils';
@@ -29,12 +29,9 @@ import { defineSavedMetrics } from '../utils';
* Controls for transforming charts into matrix/grid layouts
*/
// Utility function to check if matrixify controls should be visible.
// Controls both visibility callbacks and validator injection via mapStateToProps.
// The matrixify_enable guard prevents hidden validators from firing on
// pre-revamp charts with stale matrixify_mode defaults (fix for #38519).
// Utility function to check if matrixify controls should be visible
const isMatrixifyVisible = (
controls: ControlStateMapping | undefined,
controls: any,
axis: 'rows' | 'columns',
mode?: 'metrics' | 'dimensions',
selectionMode?: 'members' | 'topn' | 'all',

View File

@@ -1,238 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Tests for the matrixify_enable guard in isMatrixifyVisible() and
* validator injection via mapStateToProps on real matrixify control definitions.
*
* These are TDD tests for the fix to apache/superset#38519 regression:
* isMatrixifyVisible() must check matrixify_enable before evaluating mode,
* otherwise pre-revamp charts with stale matrixify_mode defaults trigger
* hidden validators that block save.
*/
import {
matrixifyControls,
isMatrixifyVisible,
} from '../../src/shared-controls/matrixifyControls';
import type { ControlPanelState, ControlStateMapping } from '../../src/types';
// Helper: build a minimal controls object for ControlPanelState
const buildControls = (
overrides: Record<string, any> = {},
): ControlStateMapping => {
const controls: Record<string, { value: any }> = {};
Object.entries(overrides).forEach(([key, value]) => {
controls[key] = { value };
});
return controls as ControlStateMapping;
};
// Helper: build a minimal ControlPanelState for mapStateToProps.
// Only provides fields that isMatrixifyVisible and mapStateToProps actually read.
const buildState = (
controlValues: Record<string, any> = {},
formData: Record<string, any> = {},
) =>
({
controls: buildControls(controlValues),
datasource: { columns: [], type: 'table' },
form_data: formData,
common: {},
metadata: {},
slice: { slice_id: 0 },
}) as unknown as ControlPanelState;
// ============================================================
// Validator injection tests via real mapStateToProps (rows)
// ============================================================
// --- matrixify_dimension_rows ---
test('matrixify_dimension_rows: validators empty when matrixify_enable is falsy', () => {
const control = matrixifyControls.matrixify_dimension_rows;
const state = buildState(
{
matrixify_enable: undefined,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'members',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators).toEqual([]);
});
test('matrixify_dimension_rows: validators present when matrixify_enable is true', () => {
const control = matrixifyControls.matrixify_dimension_rows;
const state = buildState(
{
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'members',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators.length).toBeGreaterThan(0);
});
// --- matrixify_topn_value_rows ---
test('matrixify_topn_value_rows: validators empty when matrixify_enable is falsy', () => {
const control = matrixifyControls.matrixify_topn_value_rows;
const state = buildState(
{
matrixify_enable: undefined,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'topn',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators).toEqual([]);
});
test('matrixify_topn_value_rows: validators present when matrixify_enable is true', () => {
const control = matrixifyControls.matrixify_topn_value_rows;
const state = buildState(
{
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'topn',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators.length).toBeGreaterThan(0);
});
// --- matrixify_topn_metric_rows ---
test('matrixify_topn_metric_rows: validators empty when matrixify_enable is falsy', () => {
const control = matrixifyControls.matrixify_topn_metric_rows;
const state = buildState(
{
matrixify_enable: undefined,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'topn',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators).toEqual([]);
});
test('matrixify_topn_metric_rows: validators present when matrixify_enable is true', () => {
const control = matrixifyControls.matrixify_topn_metric_rows;
const state = buildState(
{
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
matrixify_dimension_selection_mode_rows: 'topn',
},
{ matrixify_mode_rows: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators.length).toBeGreaterThan(0);
});
// ============================================================
// Validator injection tests via real mapStateToProps (columns)
// ============================================================
test('matrixify_dimension_columns: validators empty when matrixify_enable is falsy', () => {
const control = matrixifyControls.matrixify_dimension_columns;
const state = buildState(
{
matrixify_enable: undefined,
matrixify_mode_columns: 'dimensions',
matrixify_dimension_selection_mode_columns: 'members',
},
{ matrixify_mode_columns: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators).toEqual([]);
});
test('matrixify_dimension_columns: validators present when matrixify_enable is true', () => {
const control = matrixifyControls.matrixify_dimension_columns;
const state = buildState(
{
matrixify_enable: true,
matrixify_mode_columns: 'dimensions',
matrixify_dimension_selection_mode_columns: 'members',
},
{ matrixify_mode_columns: 'dimensions' },
);
const result = control.mapStateToProps!(state, {} as any);
expect(result.validators.length).toBeGreaterThan(0);
});
// ============================================================
// Direct isMatrixifyVisible guard tests
// ============================================================
test.each([
['undefined', undefined],
['null', null],
['false', false],
['0', 0],
])(
'isMatrixifyVisible returns false when matrixify_enable is %s',
(_, value) => {
const controls = buildControls({
matrixify_enable: value,
matrixify_mode_rows: 'dimensions',
});
expect(isMatrixifyVisible(controls, 'rows')).toBe(false);
},
);
test('isMatrixifyVisible returns true when matrixify_enable is true and mode matches', () => {
const controls = buildControls({
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
});
expect(isMatrixifyVisible(controls, 'rows', 'dimensions')).toBe(true);
});
test('isMatrixifyVisible returns false when matrixify_enable is true but mode is disabled', () => {
const controls = buildControls({
matrixify_enable: true,
matrixify_mode_rows: 'disabled',
});
expect(isMatrixifyVisible(controls, 'rows')).toBe(false);
});
test('isMatrixifyVisible returns true when matrixify_enable is true and any non-disabled mode (no mode filter)', () => {
const controls = buildControls({
matrixify_enable: true,
matrixify_mode_columns: 'metrics',
});
expect(isMatrixifyVisible(controls, 'columns')).toBe(true);
});

View File

@@ -29,7 +29,7 @@
"classnames": "^2.5.1",
"d3-array": "^3.2.4",
"lodash": "^4.18.1",
"memoize-one": "^6.0.0",
"memoize-one": "^5.2.1",
"react-table": "^7.8.0",
"regenerator-runtime": "^0.14.1",
"xss": "^1.0.15"

View File

@@ -1,99 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Regression coverage for memoize-one v6 adoption.
*
* memoize-one v6 changed the signature of the (optional) custom `isEqual`
* callback from per-argument `(a, b) => bool` to arg-array
* `(newArgs, lastArgs) => bool`. Of the four memoizeOne callsites in
* `src/transformProps.ts` (`processComparisonDataRecords`,
* `processDataRecords`, `processColumns`, `getBasicColorFormatter`), only
* `processColumns` passes a custom comparator (`isEqualColumns`); its
* signature already takes arg-arrays and is compatible with v6. The other
* three rely on memoize-one's default referential-equality comparator, which
* is unchanged between v5 and v6.
*
* These tests lock those assumptions in by observing the memoization
* behavior through the public `transformProps` API: identical chart-props
* input references should produce referentially-equal `data` and `columns`
* arrays (cache hit), while inputs that differ on the sub-fields each
* memoizer actually compares should produce fresh arrays (cache miss).
*/
import transformProps from '../src/transformProps';
import testData from '../../plugin-chart-table/test/testData';
test('transformProps returns referentially-equal data/columns on identical input (cache hit)', () => {
// processColumns and processDataRecords are both wrapped by memoizeOne at
// module scope. Two consecutive calls with the same chartProps reference
// should hit both caches and yield the same output references.
const first = transformProps(testData.basic);
const second = transformProps(testData.basic);
expect(second.columns).toBe(first.columns);
expect(second.data).toBe(first.data);
});
test('transformProps busts its memoization caches when sub-field inputs change (cache miss)', () => {
const first = transformProps(testData.basic);
// `processColumns` is wrapped with a custom equality (`isEqualColumns`) that
// compares specific chartProps sub-fields by identity — mutating only the
// top-level props reference is NOT enough to bust it. Here we supply a fresh
// `datasource.columnFormats` reference, which `isEqualColumns` compares with
// `===`, forcing `processColumns` to recompute and return a new `columns`
// array.
//
// `processDataRecords` uses memoize-one's default referential equality on
// `(data, columns)`. We also hand it a fresh `queriesData[0].data` array, so
// together with the recomputed `columns` reference it too cache-misses.
const freshProps = {
...testData.basic,
datasource: {
...testData.basic.datasource,
columnFormats: {},
},
queriesData: [
{
...testData.basic.queriesData[0],
data: [...(testData.basic.queriesData[0].data || [])],
},
],
};
const second = transformProps(freshProps);
expect(second.columns).not.toBe(first.columns);
expect(second.data).not.toBe(first.data);
});
test('transformProps memoizes the comparison-mode data pipeline on identical input', () => {
// Exercises `processComparisonDataRecords` (the third of four memoizeOne
// callsites in transformProps.ts) via the `comparison` fixture, which has
// `time_compare` set and therefore flows through the comparison branch
// where `passedData = comparisonData`.
//
// Note: we don't assert reference equality on `columns` here because the
// comparison branch runs `comparisonColumns` through the non-memoized
// `processComparisonColumns` helper, which returns a fresh array on each
// call by design.
const first = transformProps(testData.comparison);
const second = transformProps(testData.comparison);
expect(second.data).toBe(first.data);
});

View File

@@ -29,7 +29,7 @@
"@deck.gl/extensions": "~9.2.9",
"@deck.gl/geo-layers": "~9.2.5",
"@deck.gl/layers": "~9.2.5",
"@deck.gl/mapbox": "~9.3.2",
"@deck.gl/mapbox": "~9.3.1",
"@deck.gl/mesh-layers": "~9.2.5",
"@luma.gl/constants": "~9.2.5",
"@luma.gl/core": "~9.2.5",

View File

@@ -24,7 +24,7 @@ import {
ExplorePageState,
} from 'src/explore/types';
import { getChartKey } from 'src/explore/exploreUtils';
import { getControlsState, handleDeprecatedControls } from 'src/explore/store';
import { getControlsState } from 'src/explore/store';
import { Dispatch } from 'redux';
import {
Currency,
@@ -116,12 +116,6 @@ export const hydrateExplore =
]),
);
// Normalize deprecated controls (e.g., migrate old per-axis matrixify
// flags to matrixify_enable) before form_data is stored in Redux state.
// getControlsState also calls this on its own copy, but state.form_data
// must reflect the same migration so the two stay consistent.
handleDeprecatedControls(initialFormData);
const initialExploreState = {
form_data: initialFormData,
slice: initialSlice,

View File

@@ -17,359 +17,55 @@
* under the License.
*/
import { getChartControlPanelRegistry } from '@superset-ui/core';
import {
applyDefaultFormData,
getControlsState,
handleDeprecatedControls,
} from 'src/explore/store';
import { applyDefaultFormData } from 'src/explore/store';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(window as any).featureFlags = {};
// eslint-disable-next-line no-restricted-globals -- TODO: Migrate from describe blocks
describe('store', () => {
beforeAll(() => {
getChartControlPanelRegistry().registerValue('test-chart', {
controlPanelSections: [
{
label: 'Test section',
expanded: true,
controlSetRows: [['row_limit']],
},
],
});
});
beforeAll(() => {
getChartControlPanelRegistry().registerValue('test-chart', {
controlPanelSections: [
{
label: 'Test section',
expanded: true,
controlSetRows: [['row_limit']],
},
],
afterAll(() => {
getChartControlPanelRegistry().remove('test-chart');
});
// eslint-disable-next-line no-restricted-globals -- TODO: Migrate from describe blocks
describe('applyDefaultFormData', () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(window as any).featureFlags = {};
test('applies default to formData if the key is missing', () => {
const inputFormData = {
datasource: '11_table',
viz_type: 'test-chart',
};
let outputFormData = applyDefaultFormData(inputFormData);
expect(outputFormData.row_limit).toEqual(10000);
const inputWithRowLimit = {
...inputFormData,
row_limit: 888,
};
outputFormData = applyDefaultFormData(inputWithRowLimit);
expect(outputFormData.row_limit).toEqual(888);
});
test('keeps null if key is defined with null', () => {
const inputFormData = {
datasource: '11_table',
viz_type: 'test-chart',
row_limit: null,
};
const outputFormData = applyDefaultFormData(inputFormData);
expect(outputFormData.row_limit).toBe(null);
});
});
});
afterAll(() => {
getChartControlPanelRegistry().remove('test-chart');
});
// Helper: build ExploreState for getControlsState
const buildExploreState = (controlOverrides: Record<string, any> = {}) => ({
datasource: { type: 'table' },
controls: Object.fromEntries(
Object.entries(controlOverrides).map(([k, v]) => [k, { value: v }]),
),
});
// ============================================================
// Existing applyDefaultFormData tests
// ============================================================
test('applyDefaultFormData applies default to formData if the key is missing', () => {
const inputFormData = {
datasource: '11_table',
viz_type: 'test-chart',
};
let outputFormData = applyDefaultFormData(inputFormData);
expect(outputFormData.row_limit).toEqual(10000);
const inputWithRowLimit = {
...inputFormData,
row_limit: 888,
};
outputFormData = applyDefaultFormData(inputWithRowLimit);
expect(outputFormData.row_limit).toEqual(888);
});
test('applyDefaultFormData keeps null if key is defined with null', () => {
const inputFormData = {
datasource: '11_table',
viz_type: 'test-chart',
row_limit: null,
};
const outputFormData = applyDefaultFormData(inputFormData);
expect(outputFormData.row_limit).toBe(null);
});
// ============================================================
// Migration tests: handleDeprecatedControls normalizes stale matrixify modes
// (fix for apache/superset#38519 regression — guards validators AND
// downstream UI consumers that infer matrixify state from mode values)
// ============================================================
test('getControlsState resets stale matrixify_mode_rows to disabled when matrixify_enable key absent', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_mode_rows: 'dimensions', // stale pre-revamp default
};
const result = getControlsState(state as any, formData as any);
const modeControl = result.matrixify_mode_rows as any;
expect(modeControl?.value).toBe('disabled');
});
test('getControlsState resets stale matrixify_mode_columns to disabled when matrixify_enable key absent', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_mode_columns: 'metrics', // stale pre-revamp default
};
const result = getControlsState(state as any, formData as any);
const modeControl = result.matrixify_mode_columns as any;
expect(modeControl?.value).toBe('disabled');
});
test('getControlsState preserves matrixify mode values when matrixify_enable is true', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
};
const result = getControlsState(state as any, formData as any);
const modeControl = result.matrixify_mode_rows as any;
expect(modeControl?.value).toBe('dimensions');
});
test('getControlsState preserves matrixify mode values when matrixify_enable is explicitly false', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable: false,
matrixify_mode_rows: 'dimensions',
};
const result = getControlsState(state as any, formData as any);
const modeControl = result.matrixify_mode_rows as any;
// matrixify_enable key IS present (just false) — migration does NOT fire
expect(modeControl?.value).toBe('dimensions');
});
test('getControlsState is idempotent when matrixify modes already disabled', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_mode_rows: 'disabled',
matrixify_mode_columns: 'disabled',
};
const result = getControlsState(state as any, formData as any);
expect((result.matrixify_mode_rows as any)?.value).toBe('disabled');
expect((result.matrixify_mode_columns as any)?.value).toBe('disabled');
});
test('getControlsState handles form_data with no matrixify keys', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
};
const result = getControlsState(state as any, formData as any);
// Controls should get their defaults — matrixify_mode defaults to 'disabled'
expect((result.matrixify_mode_rows as any)?.value).toBe('disabled');
expect((result.matrixify_mode_columns as any)?.value).toBe('disabled');
});
test('getControlsState round-trip: pre-revamp form_data produces no matrixify validation errors', () => {
// Simulate a chart saved before #38519 with stale matrixify defaults
// Empty controls: on real first-load hydration, no pre-existing controls exist
const state = buildExploreState();
const preRevampFormData = {
datasource: '1__table',
viz_type: 'test-chart',
// Stale old defaults — no matrixify_enable key (legacy chart)
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const result = getControlsState(state as any, preRevampFormData as any);
// Every matrixify control should have zero validation errors
const matrixifyControlEntries = Object.entries(result).filter(([name]) =>
name.startsWith('matrixify_'),
);
const controlsWithErrors = matrixifyControlEntries.filter(
([, control]) => (control as any)?.validationErrors?.length > 0,
);
expect(controlsWithErrors).toEqual([]);
});
// ============================================================
// Dashboard hydration: applyDefaultFormData with stale form_data
// ============================================================
test('applyDefaultFormData normalizes stale matrixify modes for legacy charts', () => {
// Dashboard hydration now runs handleDeprecatedControls too, so stale
// matrixify modes from pre-revamp charts are normalized to 'disabled'.
// This protects downstream consumers (ChartContextMenu, DrillBySubmenu,
// ChartRenderer) that infer "matrixify is active" from mode values alone.
const preRevampFormData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
// No matrixify_enable key — legacy chart that never used matrixify
};
const outputFormData = applyDefaultFormData(preRevampFormData as any);
// Stale values are now normalized to 'disabled'
expect(outputFormData.matrixify_mode_rows).toBe('disabled');
expect(outputFormData.matrixify_mode_columns).toBe('disabled');
expect(outputFormData.matrixify_enable).toBe(false);
});
// ============================================================
// P1: Pre-revamp charts that actually used matrixify via old per-axis flags
// (matrixify_enable_vertical_layout / matrixify_enable_horizontal_layout)
// ============================================================
test('getControlsState preserves modes and sets matrixify_enable when old vertical flag is true', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_vertical_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const result = getControlsState(state as any, formData as any);
// Vertical layout was enabled — rows mode preserved, matrixify_enable migrated
expect((result.matrixify_mode_rows as any)?.value).toBe('dimensions');
expect((result.matrixify_enable as any)?.value).toBe(true);
// Horizontal layout was NOT enabled — columns mode reset
expect((result.matrixify_mode_columns as any)?.value).toBe('disabled');
});
test('getControlsState preserves modes and sets matrixify_enable when old horizontal flag is true', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_horizontal_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const result = getControlsState(state as any, formData as any);
// Horizontal layout was enabled — columns mode preserved, matrixify_enable migrated
expect((result.matrixify_mode_columns as any)?.value).toBe('metrics');
expect((result.matrixify_enable as any)?.value).toBe(true);
// Vertical layout was NOT enabled — rows mode reset
expect((result.matrixify_mode_rows as any)?.value).toBe('disabled');
});
test('getControlsState preserves both modes when both old per-axis flags are true', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_vertical_layout: true,
matrixify_enable_horizontal_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const result = getControlsState(state as any, formData as any);
expect((result.matrixify_mode_rows as any)?.value).toBe('dimensions');
expect((result.matrixify_mode_columns as any)?.value).toBe('metrics');
expect((result.matrixify_enable as any)?.value).toBe(true);
});
test('getControlsState resets modes when old per-axis flags are explicitly false', () => {
const state = buildExploreState();
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_vertical_layout: false,
matrixify_enable_horizontal_layout: false,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const result = getControlsState(state as any, formData as any);
// Old flags present but false — chart never used matrixify, reset stale modes
expect((result.matrixify_mode_rows as any)?.value).toBe('disabled');
expect((result.matrixify_mode_columns as any)?.value).toBe('disabled');
});
// ============================================================
// P2: Dashboard hydration (applyDefaultFormData) with old per-axis flags
// ============================================================
test('applyDefaultFormData preserves modes when old vertical flag is true', () => {
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_vertical_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const outputFormData = applyDefaultFormData(formData as any);
expect(outputFormData.matrixify_mode_rows).toBe('dimensions');
expect(outputFormData.matrixify_enable).toBe(true);
// Horizontal not enabled — columns reset
expect(outputFormData.matrixify_mode_columns).toBe('disabled');
});
test('applyDefaultFormData preserves modes when both old flags are true', () => {
const formData = {
datasource: '1__table',
viz_type: 'test-chart',
matrixify_enable_vertical_layout: true,
matrixify_enable_horizontal_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
const outputFormData = applyDefaultFormData(formData as any);
expect(outputFormData.matrixify_mode_rows).toBe('dimensions');
expect(outputFormData.matrixify_mode_columns).toBe('metrics');
expect(outputFormData.matrixify_enable).toBe(true);
});
// ============================================================
// Direct handleDeprecatedControls tests: verify form_data mutation
// so callers (hydrateExplore) can propagate migrated fields into state
// ============================================================
test('handleDeprecatedControls sets matrixify_enable on form_data when old vertical flag is true', () => {
const formData: any = {
matrixify_enable_vertical_layout: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
handleDeprecatedControls(formData);
expect(formData.matrixify_enable).toBe(true);
expect(formData.matrixify_mode_rows).toBe('dimensions');
// Horizontal not enabled — columns reset
expect(formData.matrixify_mode_columns).toBe('disabled');
});
test('handleDeprecatedControls resets modes when no matrixify_enable and no old flags', () => {
const formData: any = {
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
handleDeprecatedControls(formData);
expect(formData.matrixify_enable).toBeUndefined();
expect(formData.matrixify_mode_rows).toBe('disabled');
expect(formData.matrixify_mode_columns).toBe('disabled');
});
test('handleDeprecatedControls is idempotent — no-op when matrixify_enable already present', () => {
const formData: any = {
matrixify_enable: true,
matrixify_mode_rows: 'dimensions',
matrixify_mode_columns: 'metrics',
};
handleDeprecatedControls(formData);
// No mutation — matrixify_enable key is present
expect(formData.matrixify_enable).toBe(true);
expect(formData.matrixify_mode_rows).toBe('dimensions');
expect(formData.matrixify_mode_columns).toBe('metrics');
});

View File

@@ -41,16 +41,9 @@ type FormData = QueryFormData & {
y_axis_zero?: boolean;
y_axis_bounds?: [number | null, number | null];
datasource?: string;
matrixify_enable?: boolean;
matrixify_mode_rows?: string;
matrixify_mode_columns?: string;
// Pre-revamp per-axis enable flags (removed in #38519, may still exist in
// persisted form_data for charts that actually used matrixify)
matrixify_enable_vertical_layout?: boolean;
matrixify_enable_horizontal_layout?: boolean;
};
export function handleDeprecatedControls(formData: FormData): void {
function handleDeprecatedControls(formData: FormData): void {
// Reaffectation / handling of deprecated controls
/* eslint-disable no-param-reassign */
@@ -58,37 +51,6 @@ export function handleDeprecatedControls(formData: FormData): void {
if (formData.y_axis_zero) {
formData.y_axis_bounds = [0, null];
}
// #38519: migrate pre-revamp matrixify controls to the new single-toggle
// system. Before the revamp, per-axis enable flags
// (matrixify_enable_vertical_layout / matrixify_enable_horizontal_layout)
// gated visibility, and matrixify_mode_rows/columns defaulted to
// non-disabled values ('dimensions'/'metrics'). The revamp replaced those
// with a single matrixify_enable toggle and mode default 'disabled'.
//
// Charts that actually used matrixify pre-revamp have the old per-axis
// flags set to true — we must preserve their modes and set
// matrixify_enable: true. Charts that never used matrixify (or predate it)
// need stale mode defaults reset to 'disabled' because 4 downstream UI
// consumers (ExploreChartPanel, ChartContextMenu, DrillBySubmenu,
// ChartRenderer) infer "matrixify is active" from mode values alone.
if (!('matrixify_enable' in formData)) {
const hadVerticalLayout =
formData.matrixify_enable_vertical_layout === true;
const hadHorizontalLayout =
formData.matrixify_enable_horizontal_layout === true;
if (hadVerticalLayout || hadHorizontalLayout) {
// Pre-revamp chart that genuinely used matrixify — migrate to new flag
formData.matrixify_enable = true;
if (!hadVerticalLayout) formData.matrixify_mode_rows = 'disabled';
if (!hadHorizontalLayout) formData.matrixify_mode_columns = 'disabled';
} else {
// Never used matrixify — reset stale defaults
formData.matrixify_mode_rows = 'disabled';
formData.matrixify_mode_columns = 'disabled';
}
}
}
export function getControlsState(
@@ -127,31 +89,25 @@ export function getControlsState(
export function applyDefaultFormData(
inputFormData: FormData,
): Record<string, unknown> {
// Normalize deprecated controls before building control state — ensures
// stale matrixify modes are cleaned on the dashboard hydration path too,
// not just the explore path (getControlsState).
const cleanedFormData = { ...inputFormData };
handleDeprecatedControls(cleanedFormData);
const datasourceType = cleanedFormData.datasource?.split('__')[1] ?? '';
const vizType = cleanedFormData.viz_type;
const datasourceType = inputFormData.datasource?.split('__')[1] ?? '';
const vizType = inputFormData.viz_type;
const controlsState = getAllControlsState(
vizType,
datasourceType as DatasourceType,
null,
cleanedFormData,
inputFormData,
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const controlFormData = getFormDataFromControls(controlsState as any);
const formData: Record<string, unknown> = {};
Object.keys(controlsState)
.concat(Object.keys(cleanedFormData))
.concat(Object.keys(inputFormData))
.forEach(controlName => {
if (cleanedFormData[controlName as keyof FormData] === undefined) {
if (inputFormData[controlName as keyof FormData] === undefined) {
formData[controlName] = controlFormData[controlName];
} else {
formData[controlName] = cleanedFormData[controlName as keyof FormData];
formData[controlName] = inputFormData[controlName as keyof FormData];
}
});

View File

@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
import logging
from collections.abc import Sequence
from datetime import datetime, timedelta
from typing import Any, Optional, Union
from uuid import UUID
@@ -196,7 +197,7 @@ class BaseReportState:
db.session.commit() # pylint: disable=consider-using-transaction
except StaleDataError as ex:
# Report schedule was modified or deleted by another process
db.session.rollback()
db.session.rollback() # pylint: disable=consider-using-transaction
logger.warning(
"Report schedule (execution %s) was modified or deleted "
"during execution. This can occur when a report is deleted "
@@ -280,6 +281,7 @@ class BaseReportState:
)
urls = self._get_tabs_urls(
anchor_list,
dashboard_state=dashboard_state,
native_filter_params=native_filter_params,
user_friendly=user_friendly,
)
@@ -291,12 +293,9 @@ class BaseReportState:
# overwriting — dashboard_state may already have urlParams
# (e.g. standalone=true) that must be preserved.
state: DashboardPermalinkState = {**dashboard_state}
existing_params: list[tuple[str, str]] = state.get("urlParams") or []
merged_params: list[list[str]] = [
list(p) for p in existing_params if p[0] != "native_filters"
]
merged_params.append(["native_filters", native_filter_params or ""])
state["urlParams"] = merged_params # type: ignore[typeddict-item]
state["urlParams"] = self._merge_native_filters_into_url_params(
state.get("urlParams"), native_filter_params
)
return [
self._get_tab_url(
state,
@@ -310,12 +309,17 @@ class BaseReportState:
if filter_warnings:
self._filter_warnings.extend(filter_warnings)
if native_filter_params and native_filter_params != "()":
# Preserve any urlParams from extra.dashboard (e.g. standalone=true)
# set via API even when ALERT_REPORT_TABS is off — same merge
# semantics as the protected branch above.
fallback_state = self._report_schedule.extra.get("dashboard") or {}
return [
self._get_tab_url(
{
"urlParams": [
["native_filters", native_filter_params] # type: ignore
],
"urlParams": self._merge_native_filters_into_url_params(
fallback_state.get("urlParams"),
native_filter_params,
)
},
user_friendly=user_friendly,
)
@@ -353,24 +357,51 @@ class BaseReportState:
user_friendly=user_friendly,
)
@staticmethod
def _merge_native_filters_into_url_params(
existing: Optional[Sequence[Sequence[str]]],
native_filter_params: Optional[str],
) -> list[Sequence[str]]:
"""
Merge the report's ``native_filters`` into a permalink's existing
``urlParams``, deduping any prior ``native_filters`` entry so the
report's value wins. All other params (e.g. ``standalone=true``)
survive in their original order.
"""
merged: list[Sequence[str]] = [
list(p) for p in (existing or []) if p[0] != "native_filters"
]
merged.append(["native_filters", native_filter_params or ""])
return merged
def _get_tabs_urls(
self,
tab_anchors: list[str],
dashboard_state: Optional[DashboardPermalinkState] = None,
native_filter_params: Optional[str] = None,
user_friendly: bool = False,
) -> list[str]:
"""
Get multple tabs urls
Get multiple tabs urls.
Each per-tab permalink merges the report's ``native_filters`` into
the original ``dashboard_state.urlParams`` (deduping any prior
``native_filters`` entry), so params like ``standalone=true`` are
preserved — matching the precedence rules of the single-tab branch
in :meth:`get_dashboard_urls`.
"""
base_state: DashboardPermalinkState = dashboard_state or {}
merged_params = self._merge_native_filters_into_url_params(
base_state.get("urlParams"), native_filter_params
)
return [
self._get_tab_url(
{
"anchor": tab_anchor,
"dataMask": None,
"activeTabs": None,
"urlParams": [
["native_filters", native_filter_params] # type: ignore
],
"urlParams": merged_params,
},
user_friendly=user_friendly,
)

View File

@@ -360,18 +360,11 @@ class QueryObject: # pylint: disable=too-many-instance-attributes
engine = database.db_engine_spec.engine
if needs_transpilation:
# source_engine=engine ensures idempotency: this
# method can run more than once (validate() is called
# from both raise_for_access and get_df_payload), so
# the second pass must be able to re-parse the
# dialect-specific output (e.g. BigQuery backticks)
# produced by the first pass.
clause = transpile_to_dialect(
clause, engine, source_engine=engine, identify=True
)
clause = transpile_to_dialect(clause, engine)
sanitized_clause = sanitize_clause(clause, engine)
self.extras[param] = sanitized_clause
if sanitized_clause != clause:
self.extras[param] = sanitized_clause
except QueryClauseValidationException as ex:
raise QueryObjectValidationError(ex.message) from ex

View File

@@ -14,6 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections.abc import Sequence
from typing import Any, Optional, TypedDict
@@ -21,7 +22,10 @@ class DashboardPermalinkState(TypedDict, total=False):
dataMask: Optional[dict[str, Any]]
activeTabs: Optional[list[str]]
anchor: Optional[str]
urlParams: Optional[list[tuple[str, str]]]
# urlParams items are stored/transmitted as JSON arrays, so they
# arrive at runtime as ``list[str]``; ``Sequence[str]`` keeps the
# annotation permissive of both list and tuple shapes.
urlParams: Optional[list[Sequence[str]]]
chartStates: Optional[dict[str, Any]]

View File

@@ -393,26 +393,17 @@ Used by: `get_chart_info`, `get_chart_preview`, `get_chart_data`, `generate_char
### 11. Compile Check for Chart Creation
When creating, saving, or previewing charts, run schema validation (Tier 1)
and optionally a compile check (Tier 2) before persisting or caching.
``validate_and_compile`` glues both together; tools with tight SLAs
(``generate_explore_link``, ``update_chart_preview``) opt out of Tier 2.
When creating or saving charts, run a compile check to verify the query executes:
```python
from superset.mcp_service.chart.compile import validate_and_compile
from superset.mcp_service.chart.tool.generate_chart import _compile_chart
result = validate_and_compile(
config, form_data, dataset, run_compile_check=True
)
if not result.success:
# ``result.error_obj`` is a ``ChartGenerationError`` with fuzzy-match
# suggestions ("did you mean sum_boys?") so the LLM can self-correct.
compile_result = _compile_chart(form_data, dataset.id)
if not compile_result.success:
# Delete broken chart, return error
...
```
The lower-level ``_compile_chart(form_data, dataset_id)`` is still exported
for callers that have already done their own schema validation.
### 12. Flexible Input Parsing
`ModelListCore` handles JSON string vs. native object parsing automatically via utilities in `superset.mcp_service.utils.schema_utils`:

View File

@@ -1,362 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Shared compile/validation helpers for MCP chart-generating tools.
Two tiers are exposed:
* **Tier 1 — schema validation** (``DatasetValidator.validate_against_dataset``):
cheap, no SQL execution, catches references to columns or metrics that do
not exist in the dataset and returns fuzzy-match suggestions.
* **Tier 2 — compile check** (``_compile_chart``): runs a small (``row_limit=2``)
``ChartDataCommand`` against the underlying database to surface anything Tier
1 cannot catch (incompatible aggregates, virtual-dataset SQL bugs, etc.).
``validate_and_compile`` glues both together so each MCP tool can opt into the
tier(s) appropriate for its SLA.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import Any, Dict, List, Literal
from superset.commands.exceptions import CommandException
from superset.mcp_service.chart.validation.dataset_validator import DatasetValidator
from superset.mcp_service.common.error_schemas import (
ChartGenerationError,
ColumnSuggestion,
DatasetContext,
)
logger = logging.getLogger(__name__)
@dataclass
class CompileResult:
"""Result of a chart validate-and-compile check.
``error_obj`` carries the structured ``ChartGenerationError`` (with
suggestions, dataset context, etc.) that callers should embed in their
response envelope so LLM clients can self-correct. ``error`` retains the
plain-string form for backwards compatibility with existing call sites.
"""
success: bool
error: str | None = None
error_code: str | None = None
tier: Literal["validation", "compile"] | None = None
error_obj: ChartGenerationError | None = None
warnings: List[str] = field(default_factory=list)
row_count: int | None = None
def build_dataset_context_from_orm(dataset: Any) -> DatasetContext | None:
"""Construct a ``DatasetContext`` from an already-fetched ORM dataset.
Mirrors :py:meth:`DatasetValidator._get_dataset_context` but skips the
``DatasetDAO.find_by_id`` round trip. Callers that have already loaded
the dataset (for permission checks, etc.) should use this instead.
"""
if dataset is None:
return None
columns: List[Dict[str, Any]] = []
for col in getattr(dataset, "columns", []) or []:
columns.append(
{
"name": col.column_name,
"type": str(col.type) if col.type else "UNKNOWN",
"is_temporal": getattr(col, "is_temporal", False),
"is_numeric": getattr(col, "is_numeric", False),
}
)
metrics: List[Dict[str, Any]] = []
for metric in getattr(dataset, "metrics", []) or []:
metrics.append(
{
"name": metric.metric_name,
"expression": metric.expression,
"description": metric.description,
}
)
database = getattr(dataset, "database", None)
# ``DatasetContext.database_name`` is typed as required ``str``; default to
# an empty string when the relationship isn't loaded so we don't blow up
# Pydantic validation. The field is purely informational in error messages.
database_name = getattr(database, "database_name", None) or ""
return DatasetContext(
id=dataset.id,
table_name=dataset.table_name,
schema=dataset.schema,
database_name=database_name,
available_columns=columns,
available_metrics=metrics,
)
def _compile_chart(
form_data: Dict[str, Any],
dataset_id: int,
) -> CompileResult:
"""Execute the chart's query to verify it renders without errors.
Builds a ``QueryContext`` from *form_data* and runs it through
``ChartDataCommand``. A small ``row_limit`` is used so the check is
fast — we only need to know the query compiles and returns data, not
fetch the full result set.
Returns a :class:`CompileResult` with ``success=True`` when the
query executes cleanly.
"""
from superset.commands.chart.data.get_data_command import ChartDataCommand
from superset.commands.chart.exceptions import (
ChartDataCacheLoadError,
ChartDataQueryFailedError,
)
from superset.common.query_context_factory import QueryContextFactory
from superset.mcp_service.chart.chart_utils import adhoc_filters_to_query_filters
from superset.mcp_service.chart.preview_utils import _build_query_columns
try:
columns = _build_query_columns(form_data)
query_filters = adhoc_filters_to_query_filters(
form_data.get("adhoc_filters", [])
)
# Big Number charts use singular "metric" instead of "metrics"
metrics = form_data.get("metrics", [])
if not metrics and form_data.get("metric"):
metrics = [form_data["metric"]]
# Big Number with trendline uses granularity_sqla as the time column
if not columns and form_data.get("granularity_sqla"):
columns = [form_data["granularity_sqla"]]
factory = QueryContextFactory()
query_context = factory.create(
datasource={"id": dataset_id, "type": "table"},
queries=[
{
"columns": columns,
"metrics": metrics,
"orderby": form_data.get("orderby", []),
"row_limit": 2,
"filters": query_filters,
"time_range": form_data.get("time_range", "No filter"),
}
],
form_data=form_data,
)
command = ChartDataCommand(query_context)
command.validate()
result = command.run()
warnings: List[str] = []
row_count = 0
for query in result.get("queries", []):
if query.get("error"):
error_str = str(query["error"])
return CompileResult(
success=False,
error=error_str,
error_code="CHART_COMPILE_FAILED",
tier="compile",
error_obj=_build_compile_error(error_str),
)
row_count += len(query.get("data", []))
return CompileResult(success=True, warnings=warnings, row_count=row_count)
except (ChartDataQueryFailedError, ChartDataCacheLoadError) as exc:
return CompileResult(
success=False,
error=str(exc),
error_code="CHART_COMPILE_FAILED",
tier="compile",
error_obj=_build_compile_error(str(exc)),
)
except (CommandException, ValueError, KeyError) as exc:
return CompileResult(
success=False,
error=str(exc),
error_code="CHART_COMPILE_FAILED",
tier="compile",
error_obj=_build_compile_error(str(exc)),
)
def _adhoc_filter_column_valid(
column: str, clause: str, dataset_context: DatasetContext
) -> bool:
"""Return True if *column* is a valid reference for this filter clause.
WHERE filters must reference a physical column; HAVING filters may also
reference a saved metric because Superset resolves metric names there.
"""
if clause == "HAVING":
return DatasetValidator._column_exists(column, dataset_context)
return any(
col["name"].lower() == column.lower()
for col in dataset_context.available_columns
)
def _validate_adhoc_filter_columns(
form_data: Dict[str, Any], dataset_context: DatasetContext
) -> ChartGenerationError | None:
"""Tier-1 check for adhoc-filter column references stored in ``form_data``.
``DatasetValidator._extract_column_references`` walks the typed
``ChartConfig`` and only sees ``config.filters``. Tools like
``update_chart_preview`` and ``update_chart`` (preview path) also merge
*previously cached* ``adhoc_filters`` into ``form_data`` that aren't
represented on the new config — those would otherwise bypass validation
and surface only when Explore tries to run the query.
"""
adhoc_filters = form_data.get("adhoc_filters") or []
invalid: List[str] = []
for f in adhoc_filters:
if not isinstance(f, dict):
continue
# SIMPLE filters expose the column via "subject"; SQL-expression
# filters carry a free-form ``sqlExpression`` we can't safely parse,
# so skip those.
if f.get("expressionType") and f.get("expressionType") != "SIMPLE":
continue
column = f.get("subject") or f.get("col")
if not column or not isinstance(column, str):
continue
clause = f.get("clause", "WHERE").upper()
if not _adhoc_filter_column_valid(column, clause, dataset_context):
invalid.append(column)
if not invalid:
return None
suggestions: List[str] = []
for column in invalid:
for suggestion in DatasetValidator._get_column_suggestions(
column, dataset_context
):
name = (
suggestion.name
if isinstance(suggestion, ColumnSuggestion)
else str(suggestion)
)
if name and name not in suggestions:
suggestions.append(name)
bad = ", ".join(sorted(set(invalid)))
return ChartGenerationError(
error_type="invalid_column",
message=(f"Filter references column(s) not in dataset: {bad}"),
details=(
"Adhoc filter columns must exist on the dataset. "
"If these filters were preserved from a previous chart preview, "
"remove them or pass an explicit ``filters`` list on the new config."
),
suggestions=suggestions,
error_code="CHART_VALIDATION_FAILED",
)
def _build_compile_error(message: str) -> ChartGenerationError:
"""Wrap a raw compile-failure string in the structured response envelope."""
return ChartGenerationError(
error_type="compile_error",
message="Chart query failed to execute. The chart was not saved.",
details=message or "",
suggestions=[
"Check that all columns exist in the dataset",
"Verify aggregate functions are compatible with column types",
"Ensure filters reference valid columns",
"Try simplifying the chart configuration",
],
error_code="CHART_COMPILE_FAILED",
)
def validate_and_compile(
config: Any,
form_data: Dict[str, Any],
dataset: Any,
*,
run_compile_check: bool = True,
) -> CompileResult:
"""Run schema validation (Tier 1) and optionally a compile check (Tier 2).
``dataset`` must be an already-fetched ORM dataset; this avoids a second
``DatasetDAO.find_by_id`` round trip inside the validator.
``run_compile_check`` lets fast-path tools (``generate_explore_link``,
``update_chart_preview``) skip the live DB query while still rejecting
obviously bad column references with fuzzy-match suggestions.
Returns a :class:`CompileResult`. On failure, ``error_obj`` carries the
structured :class:`ChartGenerationError` (with ``suggestions``) that the
caller should embed in its response envelope so LLM clients can
self-correct.
"""
if dataset is None:
return CompileResult(
success=False,
error="Dataset not provided to validate_and_compile",
error_code="DATASET_NOT_FOUND",
tier="validation",
)
dataset_context = build_dataset_context_from_orm(dataset)
is_valid, error = DatasetValidator.validate_against_dataset(
config, dataset.id, dataset_context=dataset_context
)
if not is_valid:
details = ""
if error is not None:
details = error.details or error.message
if error.error_code is None:
error.error_code = "CHART_VALIDATION_FAILED"
return CompileResult(
success=False,
error=details,
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=error,
)
# Validate adhoc-filter columns living only in form_data (e.g. filters
# preserved from a previously cached preview). The typed config-level
# validator above doesn't see those.
if dataset_context is not None:
filter_error = _validate_adhoc_filter_columns(form_data, dataset_context)
if filter_error is not None:
return CompileResult(
success=False,
error=filter_error.details or filter_error.message,
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=filter_error,
)
if not run_compile_check:
return CompileResult(success=True)
return _compile_chart(form_data, dataset.id)

View File

@@ -20,7 +20,8 @@ MCP tool: generate_chart (simplified schema)
import logging
import time
from typing import Any
from dataclasses import dataclass, field
from typing import Any, Dict, List
from fastmcp import Context
from sqlalchemy.exc import SQLAlchemyError
@@ -38,11 +39,6 @@ from superset.mcp_service.chart.chart_utils import (
map_config_to_form_data,
validate_chart_dataset,
)
from superset.mcp_service.chart.compile import (
_compile_chart,
CompileResult,
validate_and_compile,
)
from superset.mcp_service.chart.schemas import (
AccessibilityMetadata,
CHART_FORM_DATA_EXCLUDED_FIELD_NAMES,
@@ -78,7 +74,86 @@ def _sanitize_generate_chart_form_data_for_llm_context(
)
__all__ = ["CompileResult", "_compile_chart", "validate_and_compile", "generate_chart"]
@dataclass
class CompileResult:
"""Result of a chart compile check (test query execution)."""
success: bool
error: str | None = None
warnings: List[str] = field(default_factory=list)
row_count: int | None = None
def _compile_chart(
form_data: Dict[str, Any],
dataset_id: int,
) -> CompileResult:
"""Execute the chart's query to verify it renders without errors.
Builds a ``QueryContext`` from *form_data* and runs it through
``ChartDataCommand``. A small ``row_limit`` is used so the check is
fast — we only need to know the query compiles and returns data, not
fetch the full result set.
Returns a :class:`CompileResult` with ``success=True`` when the
query executes cleanly.
"""
from superset.commands.chart.data.get_data_command import ChartDataCommand
from superset.commands.chart.exceptions import (
ChartDataCacheLoadError,
ChartDataQueryFailedError,
)
from superset.common.query_context_factory import QueryContextFactory
from superset.mcp_service.chart.chart_utils import adhoc_filters_to_query_filters
from superset.mcp_service.chart.preview_utils import _build_query_columns
try:
columns = _build_query_columns(form_data)
query_filters = adhoc_filters_to_query_filters(
form_data.get("adhoc_filters", [])
)
# Big Number charts use singular "metric" instead of "metrics"
metrics = form_data.get("metrics", [])
if not metrics and form_data.get("metric"):
metrics = [form_data["metric"]]
# Big Number with trendline uses granularity_sqla as the time column
if not columns and form_data.get("granularity_sqla"):
columns = [form_data["granularity_sqla"]]
factory = QueryContextFactory()
query_context = factory.create(
datasource={"id": dataset_id, "type": "table"},
queries=[
{
"columns": columns,
"metrics": metrics,
"orderby": form_data.get("orderby", []),
"row_limit": 2,
"filters": query_filters,
"time_range": form_data.get("time_range", "No filter"),
}
],
form_data=form_data,
)
command = ChartDataCommand(query_context)
command.validate()
result = command.run()
warnings: List[str] = []
row_count = 0
for query in result.get("queries", []):
if query.get("error"):
return CompileResult(success=False, error=str(query["error"]))
row_count += len(query.get("data", []))
return CompileResult(success=True, warnings=warnings, row_count=row_count)
except (ChartDataQueryFailedError, ChartDataCacheLoadError) as exc:
return CompileResult(success=False, error=str(exc))
except (CommandException, ValueError, KeyError) as exc:
return CompileResult(success=False, error=str(exc))
@tool(

View File

@@ -40,7 +40,6 @@ from superset.mcp_service.chart.chart_utils import (
generate_chart_name,
map_config_to_form_data,
)
from superset.mcp_service.chart.compile import validate_and_compile
from superset.mcp_service.chart.schemas import (
AccessibilityMetadata,
GenerateChartResponse,
@@ -163,70 +162,6 @@ def _build_preview_form_data(
return merged
def _validate_update_against_dataset(
parsed_config: Any,
form_data: dict[str, Any],
chart: Any,
) -> GenerateChartResponse | None:
"""Run Tier 1 (schema) + Tier 2 (compile) validation against the chart's
dataset. Returns ``None`` on success, or a :class:`GenerateChartResponse`
error envelope on failure that callers should return as-is.
"""
from superset.daos.dataset import DatasetDAO
dataset = getattr(chart, "datasource", None)
if dataset is None and getattr(chart, "datasource_id", None) is not None:
dataset = DatasetDAO.find_by_id(chart.datasource_id)
if dataset is None:
return GenerateChartResponse.model_validate(
{
"chart": None,
"error": {
"error_type": "DatasetNotAccessible",
"message": "Chart's dataset is not accessible",
"details": (
f"Dataset {getattr(chart, 'datasource_id', None)} "
"is missing or inaccessible."
),
},
"success": False,
"schema_version": "2.0",
"api_version": "v1",
}
)
compile_result = validate_and_compile(
parsed_config, form_data, dataset, run_compile_check=True
)
if compile_result.success:
return None
logger.warning(
"update_chart validation failed for chart %s: %s",
getattr(chart, "id", None),
compile_result.error,
)
if compile_result.error_obj is not None:
error_payload = compile_result.error_obj.model_dump()
else:
error_payload = {
"error_type": "validation_error",
"message": "Chart update validation failed",
"details": compile_result.error or "",
"error_code": compile_result.error_code,
"suggestions": [],
}
return GenerateChartResponse.model_validate(
{
"chart": None,
"error": error_payload,
"success": False,
"schema_version": "2.0",
"api_version": "v1",
}
)
def _create_preview_url(
chart: Any, form_data: dict[str, Any]
) -> tuple[str, str | None, list[str]]:
@@ -399,18 +334,6 @@ async def update_chart( # noqa: C901
if "params" in payload_or_error:
new_form_data = json.loads(payload_or_error["params"])
# Validate before persisting — catches bad column refs and runtime
# SQL errors so we don't commit a chart that can't be queried.
# Renames (no parsed_config) skip validation since form_data is
# untouched.
if parsed_config is not None and new_form_data is not None:
with event_logger.log_context(action="mcp.update_chart.validation"):
validation_error = _validate_update_against_dataset(
parsed_config, new_form_data, chart
)
if validation_error is not None:
return validation_error
with event_logger.log_context(action="mcp.update_chart.db_write"):
command = UpdateChartCommand(chart.id, payload_or_error)
updated_chart = command.run()
@@ -423,15 +346,6 @@ async def update_chart( # noqa: C901
if isinstance(preview_or_error, GenerateChartResponse):
return preview_or_error
# Validate before caching the form_data — same rationale as above.
if parsed_config is not None:
with event_logger.log_context(action="mcp.update_chart.validation"):
validation_error = _validate_update_against_dataset(
parsed_config, preview_or_error, chart
)
if validation_error is not None:
return validation_error
with event_logger.log_context(action="mcp.update_chart.preview_link"):
explore_url, form_data_key, warnings = _create_preview_url(
chart, preview_or_error

View File

@@ -30,7 +30,6 @@ from superset_core.mcp.decorators import tool, ToolAnnotations
from superset.commands.exceptions import CommandException
from superset.exceptions import OAuth2Error, OAuth2RedirectError, SupersetException
from superset.extensions import event_logger
from superset.mcp_service.auth import has_dataset_access
from superset.mcp_service.chart.chart_helpers import extract_form_data_key_from_url
from superset.mcp_service.chart.chart_utils import (
analyze_chart_capabilities,
@@ -39,7 +38,6 @@ from superset.mcp_service.chart.chart_utils import (
generate_explore_link,
map_config_to_form_data,
)
from superset.mcp_service.chart.compile import validate_and_compile
from superset.mcp_service.chart.schemas import (
AccessibilityMetadata,
PerformanceMetadata,
@@ -53,16 +51,9 @@ from superset.utils import json as utils_json
logger = logging.getLogger(__name__)
INVALID_FORM_DATA_KEY_WARNING = (
"Previous cached chart state could not be loaded from the previous "
"form_data_key. The preview was generated from the supplied "
"configuration only; the previous form_data_key may be invalid or "
"expired."
)
def _get_previous_form_data(form_data_key: str) -> dict[str, Any] | None:
"""Retrieve the previously cached form_data."""
def _get_old_adhoc_filters(form_data_key: str) -> list[Dict[str, Any]] | None:
"""Retrieve adhoc_filters from the previously cached form_data."""
from superset.commands.exceptions import CommandException
from superset.commands.explore.form_data.get import GetFormDataCommand
from superset.commands.explore.form_data.parameters import CommandParameters
@@ -74,9 +65,11 @@ def _get_previous_form_data(form_data_key: str) -> dict[str, Any] | None:
if isinstance(cached_data, str):
cached_data = utils_json.loads(cached_data)
if isinstance(cached_data, dict):
return cached_data
adhoc_filters = cached_data.get("adhoc_filters")
if adhoc_filters:
return adhoc_filters
except (KeyError, ValueError, TypeError, CommandException):
logger.debug("Could not retrieve previous form_data from cache")
logger.debug("Could not retrieve old form_data for filter preservation")
return None
@@ -90,7 +83,7 @@ def _get_previous_form_data(form_data_key: str) -> dict[str, Any] | None:
destructiveHint=True,
),
)
def update_chart_preview( # noqa: C901
def update_chart_preview(
request: UpdateChartPreviewRequest, ctx: Context
) -> Dict[str, Any]:
"""Update cached chart preview without saving.
@@ -120,76 +113,14 @@ def update_chart_preview( # noqa: C901
config, dataset_id=request.dataset_id
)
new_form_data.pop("_mcp_warnings", None)
warnings: list[str] = []
previous_form_data: dict[str, Any] | None = None
if request.form_data_key:
previous_form_data = _get_previous_form_data(request.form_data_key)
if previous_form_data is None:
warnings.append(INVALID_FORM_DATA_KEY_WARNING)
# Preserve adhoc filters from the previous cached form_data
# when the new config doesn't explicitly specify filters
if getattr(config, "filters", None) is None and previous_form_data:
old_adhoc_filters = previous_form_data.get("adhoc_filters")
if getattr(config, "filters", None) is None and request.form_data_key:
old_adhoc_filters = _get_old_adhoc_filters(request.form_data_key)
if old_adhoc_filters:
new_form_data["adhoc_filters"] = old_adhoc_filters
# Tier-1 schema validation against the dataset (no DB roundtrip).
# Runs AFTER the filter merge so filter columns are also validated.
from superset.daos.dataset import DatasetDAO
if isinstance(request.dataset_id, int) or (
isinstance(request.dataset_id, str) and request.dataset_id.isdigit()
):
dataset = DatasetDAO.find_by_id(int(request.dataset_id))
else:
dataset = DatasetDAO.find_by_id(request.dataset_id, id_column="uuid")
if dataset is None or not has_dataset_access(dataset):
return {
"chart": None,
"error": {
"error_type": "DatasetNotAccessible",
"message": (
f"Dataset not found: {request.dataset_id}. "
"Use list_datasets to find valid dataset IDs."
),
"details": (
f"Dataset {request.dataset_id} is missing or inaccessible."
),
},
"success": False,
"schema_version": "2.0",
"api_version": "v1",
}
compile_result = validate_and_compile(
config, new_form_data, dataset, run_compile_check=False
)
if not compile_result.success:
logger.warning(
"update_chart_preview validation failed: %s",
compile_result.error,
)
if compile_result.error_obj is not None:
error_payload = compile_result.error_obj.model_dump()
else:
error_payload = {
"error_type": "validation_error",
"message": "Chart preview validation failed",
"details": compile_result.error or "",
"error_code": compile_result.error_code,
"suggestions": [],
}
return {
"chart": None,
"error": error_payload,
"success": False,
"schema_version": "2.0",
"api_version": "v1",
}
# Generate new explore link with updated form_data
explore_url = generate_explore_link(request.dataset_id, new_form_data)
@@ -252,7 +183,6 @@ def update_chart_preview( # noqa: C901
"explore_url": explore_url,
"form_data_key": new_form_data_key,
"previous_form_data_key": request.form_data_key, # For reference
"warnings": warnings,
"api_endpoints": {}, # No API endpoints for unsaved charts
"performance": performance.model_dump() if performance else None,
"accessibility": accessibility.model_dump() if accessibility else None,

View File

@@ -25,12 +25,7 @@ import logging
from typing import Any, Dict, List, Tuple
from superset.mcp_service.chart.schemas import (
BigNumberChartConfig,
ColumnRef,
HandlebarsChartConfig,
MixedTimeseriesChartConfig,
PieChartConfig,
PivotTableChartConfig,
TableChartConfig,
XYChartConfig,
)
@@ -58,7 +53,7 @@ class DatasetValidator:
@staticmethod
def validate_against_dataset(
config: Any,
config: TableChartConfig | XYChartConfig,
dataset_id: int | str,
dataset_context: DatasetContext | None = None,
) -> Tuple[bool, ChartGenerationError | None]:
@@ -101,16 +96,13 @@ class DatasetValidator:
if column_error:
return False, column_error
# Validate aggregation compatibility for every config that produced
# column refs. ``_validate_aggregations`` is config-agnostic — gating
# it to Table/XY would let pie / pivot table / mixed timeseries /
# handlebars / big number slip through ``SUM(non_numeric)`` patterns
# for the fast-path tools that skip Tier 2.
aggregation_errors = DatasetValidator._validate_aggregations(
column_refs, dataset_context
)
if aggregation_errors:
return False, aggregation_errors[0]
# Validate aggregation compatibility
if isinstance(config, (TableChartConfig, XYChartConfig)):
aggregation_errors = DatasetValidator._validate_aggregations(
column_refs, dataset_context
)
if aggregation_errors:
return False, aggregation_errors[0]
return True, None
@@ -118,41 +110,14 @@ class DatasetValidator:
def _validate_columns_exist(
column_refs: List[ColumnRef], dataset_context: DatasetContext
) -> ChartGenerationError | None:
"""Validate that non-saved-metric column refs exist in the dataset.
A ``ColumnRef`` with ``saved_metric=False`` must match an entry in
``available_columns``. Saved-metric *names* don't satisfy this check —
otherwise ``{name: "sum_boys", aggregate: "SUM"}`` (no
``saved_metric=true``) would slip through and downstream code would
emit ``SUM(sum_boys)`` as an ad-hoc SIMPLE metric, producing the
broken-SQL pattern this validator is meant to prevent.
"""
column_names_lower = {
col["name"].lower() for col in dataset_context.available_columns
}
metric_names_lower = {
metric["name"].lower() for metric in dataset_context.available_metrics
}
invalid_columns: List[ColumnRef] = []
saved_metric_typo: List[ColumnRef] = []
"""Validate that non-saved-metric column refs exist in the dataset."""
invalid_columns = []
for col_ref in column_refs:
if col_ref.saved_metric:
continue
name_lower = col_ref.name.lower()
if name_lower in column_names_lower:
continue
if name_lower in metric_names_lower:
# Name matches a saved metric but the ref didn't opt into
# saved-metric resolution. Surface a tailored hint so the
# caller (typically an LLM) can flip ``saved_metric=true``.
saved_metric_typo.append(col_ref)
else:
if not DatasetValidator._column_exists(col_ref.name, dataset_context):
invalid_columns.append(col_ref)
if saved_metric_typo:
return DatasetValidator._build_saved_metric_hint_error(saved_metric_typo)
if not invalid_columns:
return None
@@ -167,36 +132,6 @@ class DatasetValidator:
invalid_columns, suggestions_map, dataset_context
)
@staticmethod
def _build_saved_metric_hint_error(
refs: List[ColumnRef],
) -> ChartGenerationError:
"""Error response when a non-saved-metric ref names a saved metric."""
names = [r.name for r in refs]
names_str = ", ".join(f"'{n}'" for n in names)
first = names[0]
return ChartGenerationError(
error_type="saved_metric_not_marked",
message=(
f"{names_str} matches a saved metric but the ref doesn't "
f"have saved_metric=true"
),
details=(
f"The dataset has a saved metric named {names_str}. To use "
f"it, set 'saved_metric': true on the column ref instead of "
f"providing an 'aggregate'. With the current shape, the "
f"chart would emit ad-hoc SQL like SUM({first}) — which is "
f"invalid because {first} is a metric expression, not a "
f"column."
),
suggestions=[
f'Did you mean: {{"name": "{first}", "saved_metric": true}}?',
"Use saved_metric=true to reference a saved dataset metric",
"Or pick a real column name and apply an aggregate to it",
],
error_code="SAVED_METRIC_NOT_MARKED",
)
@staticmethod
def _get_dataset_context(dataset_id: int | str) -> DatasetContext | None:
"""Get dataset context with column information."""
@@ -260,16 +195,11 @@ class DatasetValidator:
return None
@staticmethod
def _extract_column_references(config: Any) -> List[ColumnRef]: # noqa: C901
"""Extract all column references from a chart configuration.
Covers every supported ``ChartConfig`` variant so fast-path tools
(``generate_explore_link``, ``update_chart_preview``) that only run
Tier-1 validation still catch bad column refs in pie / pivot table /
mixed timeseries / handlebars / big number charts — not just XY and
table.
"""
refs: List[ColumnRef] = []
def _extract_column_references(
config: TableChartConfig | XYChartConfig,
) -> List[ColumnRef]:
"""Extract all column references from configuration."""
refs = []
if isinstance(config, TableChartConfig):
refs.extend(config.columns)
@@ -279,37 +209,10 @@ class DatasetValidator:
refs.extend(config.y)
if config.group_by:
refs.extend(config.group_by)
elif isinstance(config, PieChartConfig):
refs.append(config.dimension)
refs.append(config.metric)
elif isinstance(config, PivotTableChartConfig):
refs.extend(config.rows)
if config.columns:
refs.extend(config.columns)
refs.extend(config.metrics)
elif isinstance(config, MixedTimeseriesChartConfig):
refs.append(config.x)
refs.extend(config.y)
if config.group_by:
refs.extend(config.group_by)
refs.extend(config.y_secondary)
if config.group_by_secondary:
refs.extend(config.group_by_secondary)
elif isinstance(config, HandlebarsChartConfig):
if config.columns:
refs.extend(config.columns)
if config.groupby:
refs.extend(config.groupby)
if config.metrics:
refs.extend(config.metrics)
elif isinstance(config, BigNumberChartConfig):
refs.append(config.metric)
if config.temporal_column:
refs.append(ColumnRef(name=config.temporal_column))
# Filter columns (shared by every config type that defines ``filters``).
if filters := getattr(config, "filters", None):
for filter_config in filters:
# Add filter columns
if hasattr(config, "filters") and config.filters:
for filter_config in config.filters:
refs.append(ColumnRef(name=filter_config.column))
return refs
@@ -476,28 +379,20 @@ class DatasetValidator:
# Find close matches
column_lower = column_name.lower()
candidate_lookup = [name[0].lower() for name in all_names]
close_matches = difflib.get_close_matches(
column_lower,
candidate_lookup,
[name[0].lower() for name in all_names],
n=max_suggestions,
cutoff=0.6,
)
# Build suggestions with proper case and type info. ``ColumnSuggestion``
# requires ``similarity_score`` and does not have a ``data_type`` field;
# we score via difflib ratio and store the candidate kind in ``type``.
# Build suggestions with proper case and type info
suggestions = []
for match in close_matches:
for name, col_type, _data_type in all_names:
for name, col_type, data_type in all_names:
if name.lower() == match:
score = difflib.SequenceMatcher(None, column_lower, match).ratio()
suggestions.append(
ColumnSuggestion(
name=name,
type=col_type,
similarity_score=round(score, 3),
)
ColumnSuggestion(name=name, type=col_type, data_type=data_type)
)
break
@@ -608,12 +503,8 @@ class DatasetValidator:
break
if col_info:
# Check numeric aggregates on non-numeric columns.
# MIN and MAX are intentionally excluded: they work on dates
# and text in most SQL engines, so restricting them here would
# produce false-positive errors. Leave those to the Tier-2
# compile check.
numeric_aggs = ["SUM", "AVG", "STDDEV", "VAR", "MEDIAN"]
# Check numeric aggregates on non-numeric columns
numeric_aggs = ["SUM", "AVG", "MIN", "MAX", "STDDEV", "VAR", "MEDIAN"]
if (
col_ref.aggregate in numeric_aggs
and not col_info.get("is_numeric", False)

View File

@@ -22,26 +22,21 @@ This tool generates a URL to the Superset explore interface with the specified
chart configuration.
"""
import logging
from typing import Any, Dict
from fastmcp import Context
from superset_core.mcp.decorators import tool, ToolAnnotations
from superset.extensions import event_logger
from superset.mcp_service.auth import has_dataset_access
from superset.mcp_service.chart.chart_helpers import extract_form_data_key_from_url
from superset.mcp_service.chart.chart_utils import (
generate_explore_link as generate_url,
map_config_to_form_data,
)
from superset.mcp_service.chart.compile import validate_and_compile
from superset.mcp_service.chart.schemas import (
GenerateExploreLinkRequest,
)
logger = logging.getLogger(__name__)
@tool(
tags=["explore"],
@@ -136,24 +131,6 @@ async def generate_explore_link(
),
}
if not has_dataset_access(dataset):
logger.warning(
"User attempted to access dataset %s without permission",
request.dataset_id,
)
await ctx.warning(
"Dataset access denied: dataset_id=%s" % (request.dataset_id,)
)
return {
"url": "",
"form_data": {},
"form_data_key": None,
"error": (
f"Dataset not found: {request.dataset_id}. "
"Use list_datasets to find valid dataset IDs."
),
}
await ctx.report_progress(2, 4, "Converting configuration to form data")
with event_logger.log_context(action="mcp.generate_explore_link.form_data"):
# Normalize column names to match canonical dataset column names
@@ -188,38 +165,6 @@ async def generate_explore_link(
)
)
# Tier-1 schema validation against the dataset (no DB roundtrip).
# Catches references to non-existent columns/metrics with fuzzy
# suggestions so the LLM can self-correct ("did you mean sum_boys?").
with event_logger.log_context(action="mcp.generate_explore_link.validation"):
compile_result = validate_and_compile(
normalized_config,
form_data,
dataset,
run_compile_check=False,
)
if not compile_result.success:
await ctx.warning(
"Explore link validation failed: error=%s" % (compile_result.error,)
)
error_payload: Dict[str, Any]
if compile_result.error_obj is not None:
error_payload = compile_result.error_obj.model_dump()
else:
error_payload = {
"error_type": "validation_error",
"message": "Explore link validation failed",
"details": compile_result.error or "",
"error_code": compile_result.error_code,
"suggestions": [],
}
return {
"url": "",
"form_data": form_data,
"form_data_key": None,
"error": error_payload,
}
await ctx.report_progress(3, 4, "Generating explore URL")
with event_logger.log_context(
action="mcp.generate_explore_link.url_generation"

View File

@@ -1568,7 +1568,6 @@ def transpile_to_dialect(
sql: str,
target_engine: str,
source_engine: str | None = None,
identify: bool = False,
) -> str:
"""
Transpile SQL from one database dialect to another using SQLGlot.
@@ -1577,7 +1576,6 @@ def transpile_to_dialect(
sql: The SQL query to transpile
target_engine: The target database engine (e.g., "mysql", "postgresql")
source_engine: The source database engine. If None, uses generic SQL dialect.
identify: If True, quote all identifiers per the target dialect.
Returns:
The transpiled SQL string
@@ -1600,7 +1598,6 @@ def transpile_to_dialect(
copy=True,
comments=False,
pretty=False,
identify=identify,
)
except ParseError as ex:
raise QueryClauseValidationException(f"Cannot parse SQL clause: {sql}") from ex

View File

@@ -183,11 +183,6 @@ def load_explore_json_into_cache( # pylint: disable=too-many-locals
except Exception as ex:
if isinstance(ex, SupersetVizException):
errors = ex.errors
# Extract SIP-40 style errors when available
elif isinstance(ex, SupersetErrorException):
errors = [dataclasses.asdict(ex.error)] # type: ignore
elif isinstance(ex, SupersetErrorsException):
errors = [dataclasses.asdict(error) for error in ex.errors] # type: ignore
else:
error = ex.message if hasattr(ex, "message") else str(ex)
errors = [error] # type: ignore

View File

@@ -68,7 +68,6 @@ from superset.daos.datasource import DatasourceDAO
from superset.dashboards.permalink.exceptions import DashboardPermalinkGetFailedError
from superset.exceptions import (
CacheLoadError,
SupersetErrorException,
SupersetException,
SupersetSecurityException,
)
@@ -267,10 +266,6 @@ class Superset(BaseSupersetView):
)
return self.generate_json(viz_obj, response_type)
except SupersetErrorException:
# Let structured Superset errors (e.g. OAuth2RedirectError) propagate
# so the global Flask error handler serializes them.
raise
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)
@@ -295,7 +290,7 @@ class Superset(BaseSupersetView):
@etag_cache()
@check_resource_permissions(check_datasource_perms)
@deprecated(eol_version="5.0.0")
def explore_json( # noqa: C901
def explore_json(
self, datasource_type: str | None = None, datasource_id: int | None = None
) -> FlaskResponse:
"""Serves all request that GET or POST form_data
@@ -382,10 +377,6 @@ class Superset(BaseSupersetView):
)
return self.generate_json(viz_obj, response_type)
except SupersetErrorException:
# Let structured Superset errors (e.g. OAuth2RedirectError) propagate
# so the global Flask error handler serializes them.
raise
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)

View File

@@ -51,7 +51,6 @@ from superset.exceptions import (
NullValueException,
QueryObjectValidationError,
SpatialException,
SupersetErrorException,
SupersetSecurityException,
)
from superset.extensions import cache_manager, security_manager
@@ -613,10 +612,6 @@ class BaseViz: # pylint: disable=too-many-public-methods
)
self.errors.append(error)
self.status = QueryStatus.FAILED
except SupersetErrorException:
# Let structured Superset errors (e.g. OAuth2RedirectError) propagate
# so the global Flask error handler serializes them.
raise
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)

View File

@@ -41,7 +41,7 @@ from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import OAuth2RedirectError, SupersetException
from superset.exceptions import SupersetException
from superset.extensions import cache_manager
from superset.models import core as models
from superset.models.dashboard import Dashboard
@@ -458,59 +458,6 @@ class TestCore(SupersetTestCase):
assert rv.status_code == 404
assert data["error"] == "Cached data not found"
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.viz.BaseViz.get_df")
def test_explore_json_propagates_oauth2_redirect_error(
self, mock_get_df: mock.Mock
) -> None:
"""
SupersetErrorException exceptions bubble up properly.
"""
mock_get_df.side_effect = OAuth2RedirectError(
url="https://accounts.example.com/o/oauth2/v2/auth?...",
tab_id="tab-123",
redirect_uri="https://superset.example.com/oauth2/redirect",
)
self.login(ADMIN_USERNAME)
slc = self.get_slice("Life Expectancy VS Rural %")
rv = self.client.post(
f"/superset/explore_json/{slc.datasource_type}/{slc.datasource_id}/",
data={"form_data": json.dumps(slc.form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
assert "errors" in data, data
assert data["errors"][0]["error_type"] == "OAUTH2_REDIRECT"
assert data["errors"][0]["extra"] == {
"url": "https://accounts.example.com/o/oauth2/v2/auth?...",
"tab_id": "tab-123",
"redirect_uri": "https://superset.example.com/oauth2/redirect",
}
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.viz.BaseViz.get_df")
def test_explore_json_generic_exception_still_returns_viz_get_df_error(
self, mock_get_df: mock.Mock
) -> None:
"""
Non-Superset exceptions raised by ``get_df`` are reported as the
generic ``VIZ_GET_DF_ERROR``.
"""
mock_get_df.side_effect = RuntimeError("boom")
self.login(ADMIN_USERNAME)
slc = self.get_slice("Life Expectancy VS Rural %")
rv = self.client.post(
f"/superset/explore_json/{slc.datasource_type}/{slc.datasource_id}/",
data={"form_data": json.dumps(slc.form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
assert "errors" in data, data
assert data["errors"][0]["error_type"] == "VIZ_GET_DF_ERROR"
assert data["errors"][0]["message"] == "boom"
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]

View File

@@ -624,6 +624,65 @@ def test_get_tab_urls(
]
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=True)
def test_get_dashboard_urls_multitab_preserves_url_params(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
"""Multi-tab fan-out must preserve dashboard_state.urlParams (e.g. standalone)
and replace any pre-existing native_filters entry with the report's value —
matching the single-tab branch's merge semantics."""
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
mock_report_schedule.type = "report_type"
mock_report_schedule.report_format = "report_format"
mock_report_schedule.owners = [1, 2]
mock_report_schedule.recipients = []
native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
# Use list-of-lists (not tuples) — extra_json deserializes urlParams from
# JSON arrays. Includes a stale native_filters entry to exercise the
# dedup-then-append step in the merge.
mock_report_schedule.extra = {
"dashboard": {
"anchor": json.dumps(["TAB-1", "TAB-2"]),
"urlParams": [
["standalone", "true"],
["native_filters", "(STALE_FILTER:(filterType:filter_select))"],
["show_filters", "0"],
],
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
mock_permalink_cls.return_value.run.side_effect = ["key1", "key2"]
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
class_instance.get_dashboard_urls()
assert mock_permalink_cls.call_count == 2
for idx, expected_anchor in enumerate(["TAB-1", "TAB-2"]):
state = mock_permalink_cls.call_args_list[idx].kwargs["state"]
# Stale native_filters is replaced (not duplicated); other params
# survive in their original order; report's native_filters appended.
assert state["urlParams"] == [
["standalone", "true"],
["show_filters", "0"],
["native_filters", native_filter_rison],
]
# Each per-tab permalink targets exactly that tab.
assert state["anchor"] == expected_anchor
@patch(
"superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
)
@@ -702,6 +761,58 @@ def test_get_dashboard_urls_native_filters_without_tabs(
assert "permalink_key" in result[0]
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=False)
def test_get_dashboard_urls_flag_off_preserves_url_params(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
"""The post-``if``-block fall-through in ``get_dashboard_urls`` must
honor any urlParams set in ``extra.dashboard`` (e.g. via API) — same
merge semantics as the protected branch.
Reachability: only when ``dashboard_state`` is falsy OR
``ALERT_REPORT_TABS=False``. The flag-on / no-anchor case lands in
the single-tab merge at L290-306, not here.
"""
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
native_filter_rison = "(NATIVE_FILTER-abc:!(val1))"
mock_report_schedule.extra = {
"dashboard": {
"urlParams": [
["standalone", "true"],
["native_filters", "(STALE_FILTER:!(stale))"],
["show_filters", "0"],
],
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
mock_permalink_cls.return_value.run.return_value = "permalink_key"
class_instance.get_dashboard_urls()
state = mock_permalink_cls.call_args_list[0].kwargs["state"]
# Stale native_filters replaced; existing params survive in order;
# report's native_filters appended.
assert state["urlParams"] == [
["standalone", "true"],
["show_filters", "0"],
["native_filters", native_filter_rison],
]
def create_report_schedule(
mocker: MockerFixture,
custom_width: int | None = None,

View File

@@ -1,445 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Integration-style tests for ``validate_and_compile``.
These tests exercise the real ``DatasetValidator.validate_against_dataset``
path so fast-path tools (``generate_explore_link``, ``update_chart_preview``)
that only use Tier-1 validation are exercised end-to-end.
"""
from unittest.mock import Mock, patch
import pytest
from superset.mcp_service.chart.compile import (
build_dataset_context_from_orm,
CompileResult,
validate_and_compile,
)
from superset.mcp_service.chart.schemas import (
BigNumberChartConfig,
ColumnRef,
FilterConfig,
PieChartConfig,
PivotTableChartConfig,
TableChartConfig,
XYChartConfig,
)
def _orm_dataset(
*,
column_names: list[str] | None = None,
metric_names: list[str] | None = None,
has_database: bool = True,
) -> Mock:
"""Build a Mock dataset that satisfies build_dataset_context_from_orm."""
columns = []
for name in column_names or ["ds", "gender", "name", "num"]:
col = Mock()
col.column_name = name
col.type = "TEXT"
col.is_temporal = name == "ds"
col.is_numeric = name == "num"
columns.append(col)
metrics = []
for name in metric_names or ["sum_boys", "sum_girls"]:
m = Mock()
m.metric_name = name
m.expression = f"SUM({name})"
m.description = None
metrics.append(m)
dataset = Mock()
dataset.id = 3
dataset.table_name = "birth_names"
dataset.schema = None
dataset.columns = columns
dataset.metrics = metrics
if has_database:
db = Mock()
db.database_name = "examples"
dataset.database = db
else:
dataset.database = None
return dataset
class TestBuildDatasetContextFromOrm:
"""Cover the helper that converts ORM dataset → DatasetContext."""
def test_handles_missing_database_relationship(self):
"""``database_name`` defaults to '' when ``dataset.database`` is None
so Pydantic validation doesn't blow up."""
ds = _orm_dataset(has_database=False)
ctx = build_dataset_context_from_orm(ds)
assert ctx is not None
assert ctx.database_name == ""
assert ctx.id == 3
assert {c["name"] for c in ctx.available_columns} == {
"ds",
"gender",
"name",
"num",
}
assert {m["name"] for m in ctx.available_metrics} == {
"sum_boys",
"sum_girls",
}
def test_returns_none_for_none_input(self):
assert build_dataset_context_from_orm(None) is None
class TestValidateAndCompileChartTypeCoverage:
"""Tier-1 validation must catch bad column refs in every supported
chart-config variant — not just XY and table."""
def test_xy_bad_metric_column_rejected(self):
ds = _orm_dataset()
config = XYChartConfig(
chart_type="xy",
x=ColumnRef(name="ds"),
y=[ColumnRef(name="num_boys", aggregate="SUM")],
kind="line",
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success
assert result.tier == "validation"
assert result.error_obj is not None
assert any("sum_boys" in s for s in (result.error_obj.suggestions or []))
def test_pie_bad_metric_column_rejected(self):
ds = _orm_dataset()
config = PieChartConfig(
dimension=ColumnRef(name="gender"),
metric=ColumnRef(name="num_boys", aggregate="SUM"),
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success, "Pie chart with bad metric column should fail"
assert result.tier == "validation"
assert result.error_obj is not None
assert any("sum_boys" in s for s in (result.error_obj.suggestions or []))
def test_pie_valid_dimension_and_saved_metric_passes(self):
ds = _orm_dataset()
config = PieChartConfig(
dimension=ColumnRef(name="gender"),
metric=ColumnRef(name="sum_boys", saved_metric=True),
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert result.success, result.error
def test_pivot_table_bad_row_rejected(self):
ds = _orm_dataset()
config = PivotTableChartConfig(
rows=[ColumnRef(name="bogus_dim")],
metrics=[ColumnRef(name="sum_boys", saved_metric=True)],
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success
assert result.error_obj is not None
def test_big_number_bad_temporal_column_rejected(self):
ds = _orm_dataset()
config = BigNumberChartConfig(
chart_type="big_number",
metric=ColumnRef(name="sum_boys", saved_metric=True),
temporal_column="not_a_real_temporal",
show_trendline=True,
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success, "BigNumber temporal_column must be validated"
assert result.error_obj is not None
assert "not_a_real_temporal" in (result.error_obj.message or "")
def test_pie_with_sum_on_non_numeric_column_rejected(self):
"""Tier-1 aggregation compatibility now runs for non-Table/XY too —
a pie ``metric={"name": "gender", "aggregate": "SUM"}`` would emit
``SUM(gender)`` which the DB rejects, so the validator must catch it
before we hand back an explore URL."""
ds = _orm_dataset()
config = PieChartConfig(
dimension=ColumnRef(name="name"),
metric=ColumnRef(name="gender", aggregate="SUM"),
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success, "SUM on a TEXT column must reject"
assert result.error_obj is not None
assert result.error_obj.error_code == "INVALID_AGGREGATION"
def test_pivot_table_sum_on_non_numeric_column_rejected(self):
ds = _orm_dataset()
config = PivotTableChartConfig(
rows=[ColumnRef(name="gender")],
metrics=[ColumnRef(name="name", aggregate="SUM")],
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success
assert result.error_obj is not None
assert result.error_obj.error_code == "INVALID_AGGREGATION"
def test_pivot_table_min_on_non_numeric_column_passes(self):
"""MIN and MAX are not numeric-only (valid on dates/text in SQL).
They are left to the Tier-2 compile check rather than being rejected
by Tier-1 schema validation.
"""
ds = _orm_dataset()
config = PivotTableChartConfig(
rows=[ColumnRef(name="gender")],
metrics=[ColumnRef(name="name", aggregate="MIN")],
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert result.success, (
"MIN on a text column should not be rejected by Tier-1 validation"
)
def test_table_with_invalid_filter_column_rejected(self):
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table",
columns=[ColumnRef(name="gender")],
filters=[FilterConfig(column="bogus", op="=", value="x")],
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success
assert result.error_obj is not None
class TestSavedMetricNotMarked:
"""A non-saved-metric ColumnRef whose name matches a saved metric is a
common LLM mistake (forgetting to set ``saved_metric=true``). The
validator should surface a tailored hint instead of letting the bad SQL
through."""
def test_table_metric_name_without_saved_metric_flag_rejected(self):
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table",
columns=[
ColumnRef(name="gender"),
# ``sum_boys`` is a saved metric on the dataset, but
# saved_metric=False (default) would render as
# ``SUM(sum_boys)`` ad-hoc SQL — broken.
ColumnRef(name="sum_boys", aggregate="SUM"),
],
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success, (
"ref.name matches a saved metric but saved_metric=False -> reject"
)
assert result.error_obj is not None
assert result.error_obj.error_code == "SAVED_METRIC_NOT_MARKED"
# Suggestion should point the LLM at the right correction.
suggestions_text = " ".join(result.error_obj.suggestions or [])
assert "saved_metric" in suggestions_text
assert "sum_boys" in suggestions_text
def test_pie_metric_name_without_saved_metric_flag_rejected(self):
ds = _orm_dataset()
config = PieChartConfig(
dimension=ColumnRef(name="gender"),
metric=ColumnRef(name="sum_boys", aggregate="SUM"),
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert not result.success
assert result.error_obj is not None
assert result.error_obj.error_code == "SAVED_METRIC_NOT_MARKED"
def test_explicit_saved_metric_passes(self):
ds = _orm_dataset()
config = PieChartConfig(
dimension=ColumnRef(name="gender"),
metric=ColumnRef(name="sum_boys", saved_metric=True),
)
result = validate_and_compile(config, {}, ds, run_compile_check=False)
assert result.success, result.error
class TestAdhocFiltersFromFormData:
"""Filters merged into form_data (not present on the typed config) must
also be validated. Without this hook, ``update_chart_preview`` could
smuggle bad column refs through preserved adhoc filters."""
def test_unknown_adhoc_filter_subject_rejected(self):
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
form_data = {
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"subject": "removed_column",
"operator": "==",
"comparator": "x",
}
]
}
result = validate_and_compile(config, form_data, ds, run_compile_check=False)
assert not result.success
assert result.error_obj is not None
assert "removed_column" in (result.error_obj.message or "")
def test_known_adhoc_filter_subject_passes(self):
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
form_data = {
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"subject": "gender",
"operator": "==",
"comparator": "boy",
}
]
}
result = validate_and_compile(config, form_data, ds, run_compile_check=False)
assert result.success, result.error
def test_sql_expression_filter_skipped(self):
"""SQL-expression filters carry a free-form ``sqlExpression`` we can't
safely parse, so they should pass Tier-1 untouched."""
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
form_data = {
"adhoc_filters": [
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "1 = 1",
}
]
}
result = validate_and_compile(config, form_data, ds, run_compile_check=False)
assert result.success
def test_where_filter_with_metric_name_rejected(self):
"""A saved-metric name used as a WHERE filter subject must be rejected.
WHERE filters need a physical column; metric names are only valid in
HAVING clauses where Superset can resolve them.
"""
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
form_data = {
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "sum_boys", # saved metric, not a physical column
"operator": ">",
"comparator": "0",
}
]
}
result = validate_and_compile(config, form_data, ds, run_compile_check=False)
assert not result.success, (
"A saved-metric name used in a WHERE filter must not pass Tier-1"
)
assert result.error_obj is not None
assert "sum_boys" in (result.error_obj.message or "")
def test_having_filter_with_metric_name_passes(self):
"""A saved-metric name used in a HAVING filter must be accepted.
HAVING filters are aggregate-level conditions; Superset resolves metric
names there so they are valid references.
"""
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
form_data = {
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "sum_boys", # saved metric — valid in HAVING
"operator": ">",
"comparator": "0",
}
]
}
result = validate_and_compile(config, form_data, ds, run_compile_check=False)
assert result.success, (
"A saved-metric name in a HAVING filter should pass Tier-1 validation"
)
class TestValidateAndCompileTier2:
"""When ``run_compile_check=True`` and Tier-1 passes, the helper must
invoke ``_compile_chart`` and surface its outcome."""
@patch("superset.mcp_service.chart.compile._compile_chart")
def test_tier2_runs_when_tier1_passes(self, mock_compile):
mock_compile.return_value = CompileResult(success=True)
ds = _orm_dataset()
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="gender")]
)
result = validate_and_compile(
config, {"adhoc_filters": []}, ds, run_compile_check=True
)
assert result.success
mock_compile.assert_called_once()
@patch("superset.mcp_service.chart.compile._compile_chart")
def test_tier2_skipped_on_tier1_failure(self, mock_compile):
ds = _orm_dataset()
config = TableChartConfig(chart_type="table", columns=[ColumnRef(name="bogus")])
result = validate_and_compile(config, {}, ds, run_compile_check=True)
assert not result.success
assert result.tier == "validation"
mock_compile.assert_not_called()
def test_dataset_none_returns_dataset_not_found(self):
result = validate_and_compile(None, {}, None, run_compile_check=True)
assert not result.success
assert result.error_code == "DATASET_NOT_FOUND"
@pytest.mark.parametrize(
"config_factory",
[
lambda: PieChartConfig(
dimension=ColumnRef(name="gender"),
metric=ColumnRef(name="sum_boys", saved_metric=True),
),
lambda: TableChartConfig(
chart_type="table",
columns=[
ColumnRef(name="gender"),
ColumnRef(name="sum_boys", saved_metric=True),
],
),
],
)
def test_valid_configs_pass_tier1(config_factory):
ds = _orm_dataset()
result = validate_and_compile(config_factory(), {}, ds, run_compile_check=False)
assert result.success, result.error

View File

@@ -745,9 +745,6 @@ class TestUpdateChartNameOnly:
class TestUpdateChartPreviewFirst:
"""Integration-style tests for the preview-first default flow."""
@patch.object(
update_chart_module, "_validate_update_against_dataset", return_value=None
)
@patch.object(update_chart_module, "_create_preview_url", new_callable=Mock)
@patch(
"superset.commands.chart.update.UpdateChartCommand",
@@ -767,7 +764,6 @@ class TestUpdateChartPreviewFirst:
mock_check_access,
mock_update_cmd_cls,
mock_create_preview,
unused_validate_mock,
mcp_server,
):
"""Default update flow returns a preview URL and does NOT save."""
@@ -938,9 +934,6 @@ class TestBuildPreviewFormData:
class TestUpdateChartSaveWithConfig:
"""Save-path integration tests for update_chart with a full config payload."""
@patch.object(
update_chart_module, "_validate_update_against_dataset", return_value=None
)
@patch(
"superset.commands.chart.update.UpdateChartCommand",
new_callable=Mock,
@@ -958,7 +951,6 @@ class TestUpdateChartSaveWithConfig:
mock_find_by_id,
mock_check_access,
mock_update_cmd_cls,
unused_validate_mock,
mcp_server,
):
"""generate_preview=False with config persists and returns saved chart."""
@@ -1094,9 +1086,6 @@ class TestUpdateChartErrorPaths:
assert error["error_type"] == "CommandException"
assert "boom" in error["details"]
@patch.object(
update_chart_module, "_validate_update_against_dataset", return_value=None
)
@patch.object(update_chart_module, "_create_preview_url", new_callable=Mock)
@patch(
"superset.mcp_service.auth.check_chart_data_access",
@@ -1111,7 +1100,6 @@ class TestUpdateChartErrorPaths:
mock_find_by_id,
mock_check_access,
mock_create_preview,
unused_validate_mock,
mcp_server,
):
"""If _create_preview_url returns (url, None), form_data_key comes from url."""
@@ -1149,140 +1137,3 @@ class TestUpdateChartErrorPaths:
assert result.structured_content["success"] is True
assert result.structured_content["form_data_key"] == "url_embedded_key"
class TestUpdateChartValidationGate:
"""Tier-1+2 validation prevents bad config from reaching DB or cache."""
@staticmethod
def _mock_chart_with_dataset(chart_id: int = 1) -> Mock:
chart = Mock()
chart.id = chart_id
chart.datasource_id = 10
chart.slice_name = "Existing"
chart.viz_type = "table"
chart.uuid = "abc-123"
chart.params = '{"viz_type": "table", "datasource": "10__table"}'
# validate_and_compile is mocked, so dataset shape doesn't matter.
chart.datasource = Mock()
return chart
@patch.object(update_chart_module, "validate_and_compile")
@patch.object(update_chart_module, "_create_preview_url", new_callable=Mock)
@patch(
"superset.mcp_service.auth.check_chart_data_access",
new_callable=Mock,
)
@patch("superset.daos.chart.ChartDAO.find_by_id", new_callable=Mock)
@patch("superset.db.session")
@pytest.mark.asyncio
async def test_preview_path_validation_failure_skips_cache(
self,
mock_db_session,
mock_find_by_id,
mock_check_access,
mock_create_preview,
mock_validate,
mcp_server,
):
"""Preview path: bad column → structured error, _create_preview_url
must NOT be called."""
from superset.mcp_service.chart.compile import CompileResult
from superset.mcp_service.common.error_schemas import ChartGenerationError
mock_find_by_id.return_value = self._mock_chart_with_dataset()
mock_check_access.return_value = DatasetValidationResult(
is_valid=True, dataset_id=10, dataset_name="ds", warnings=[]
)
mock_validate.return_value = CompileResult(
success=False,
error="Column 'num_boys' does not exist",
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=ChartGenerationError(
error_type="invalid_column",
message="Column 'num_boys' does not exist",
details="Available: ds, gender, sum_boys",
suggestions=["sum_boys"],
error_code="CHART_VALIDATION_FAILED",
),
)
request = {
"identifier": 1,
"config": {
"chart_type": "xy",
"x": {"name": "ds"},
"y": [{"name": "num_boys", "aggregate": "SUM"}],
"kind": "line",
},
}
async with Client(mcp) as client:
result = await client.call_tool("update_chart", {"request": request})
assert result.structured_content["success"] is False
error = result.structured_content["error"]
assert error["error_code"] == "CHART_VALIDATION_FAILED"
assert "sum_boys" in error["suggestions"]
mock_create_preview.assert_not_called()
@patch.object(update_chart_module, "validate_and_compile")
@patch(
"superset.commands.chart.update.UpdateChartCommand",
new_callable=Mock,
)
@patch(
"superset.mcp_service.auth.check_chart_data_access",
new_callable=Mock,
)
@patch("superset.daos.chart.ChartDAO.find_by_id", new_callable=Mock)
@patch("superset.db.session")
@pytest.mark.asyncio
async def test_persist_path_validation_failure_skips_db_write(
self,
mock_db_session,
mock_find_by_id,
mock_check_access,
mock_update_cmd_cls,
mock_validate,
mcp_server,
):
"""Persist path: validation failure → UpdateChartCommand NOT called."""
from superset.mcp_service.chart.compile import CompileResult
from superset.mcp_service.common.error_schemas import ChartGenerationError
mock_find_by_id.return_value = self._mock_chart_with_dataset(chart_id=42)
mock_check_access.return_value = DatasetValidationResult(
is_valid=True, dataset_id=10, dataset_name="ds", warnings=[]
)
mock_validate.return_value = CompileResult(
success=False,
error="Column 'bad_col' does not exist",
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=ChartGenerationError(
error_type="invalid_column",
message="Column 'bad_col' does not exist",
details="Available: a, b, c",
suggestions=["a"],
error_code="CHART_VALIDATION_FAILED",
),
)
request = {
"identifier": 42,
"generate_preview": False,
"config": {
"chart_type": "table",
"columns": [{"name": "bad_col"}],
},
}
async with Client(mcp) as client:
result = await client.call_tool("update_chart", {"request": request})
assert result.structured_content["success"] is False
error = result.structured_content["error"]
assert error["error_code"] == "CHART_VALIDATION_FAILED"
mock_update_cmd_cls.assert_not_called()

View File

@@ -19,13 +19,8 @@
Unit tests for update_chart_preview MCP tool
"""
import importlib
from unittest.mock import Mock, patch
import pytest
from fastmcp import Client
from superset.mcp_service.app import mcp
from superset.mcp_service.chart.schemas import (
AxisConfig,
ColumnRef,
@@ -36,47 +31,6 @@ from superset.mcp_service.chart.schemas import (
XYChartConfig,
)
# The package ``__init__.py`` re-exports the ``update_chart_preview`` tool
# function under the same dotted path as the module, so mock.patch's string
# lookup of ``...update_chart_preview.<attr>`` can resolve to the function on
# some Python versions. Hold a direct module reference for ``patch.object``.
update_chart_preview_module = importlib.import_module(
"superset.mcp_service.chart.tool.update_chart_preview"
)
@pytest.fixture
def mcp_server():
return mcp
@pytest.fixture
def mock_auth():
"""Mock authentication for tool-invocation tests."""
with patch("superset.mcp_service.auth.get_user_from_request") as mock_get_user:
user = Mock()
user.id = 1
user.username = "admin"
mock_get_user.return_value = user
yield mock_get_user
def _mock_dataset(id: int = 1) -> Mock:
"""Mock SqlaTable with the attributes the tool reads."""
column = Mock()
column.column_name = "ds"
column.type = "TIMESTAMP"
database = Mock()
database.database_name = "main"
dataset = Mock()
dataset.id = id
dataset.table_name = "birth_names"
dataset.schema = None
dataset.columns = [column]
dataset.metrics = []
dataset.database = database
return dataset
class TestUpdateChartPreview:
"""Tests for update_chart_preview MCP tool."""
@@ -264,7 +218,6 @@ class TestUpdateChartPreview:
"explore_url",
"form_data_key",
"previous_form_data_key",
"warnings",
"api_endpoints",
"performance",
"accessibility",
@@ -519,279 +472,3 @@ class TestUpdateChartPreview:
)
assert request.config.sort_by == ["sales", "profit"]
assert len(request.config.columns) == 3
@patch("superset.commands.explore.form_data.get.GetFormDataCommand")
def test_get_previous_form_data_parses_json_cache_hit(
self,
mock_get_form_data_command,
) -> None:
"""Previous form_data lookup parses JSON strings from the cache."""
cached_adhoc_filters = [
{
"clause": "WHERE",
"comparator": "North",
"expressionType": "SIMPLE",
"operator": "==",
"subject": "region",
}
]
mock_get_form_data_command.return_value.run.return_value = (
'{"adhoc_filters": ['
'{"clause": "WHERE", "comparator": "North", '
'"expressionType": "SIMPLE", "operator": "==", "subject": "region"}'
'], "viz_type": "table"}'
)
result = update_chart_preview_module._get_previous_form_data("valid_key_12345")
assert result == {
"adhoc_filters": cached_adhoc_filters,
"viz_type": "table",
}
command_params = mock_get_form_data_command.call_args.args[0]
assert command_params.key == "valid_key_12345"
@patch("superset.commands.explore.form_data.get.GetFormDataCommand")
def test_get_previous_form_data_returns_none_for_cache_failure(
self,
mock_get_form_data_command,
) -> None:
"""Previous form_data lookup treats command failures as cache misses."""
mock_get_form_data_command.return_value.run.side_effect = (
update_chart_preview_module.CommandException("cache read failed")
)
result = update_chart_preview_module._get_previous_form_data(
"missing_key_12345"
)
assert result is None
@patch.object(update_chart_preview_module, "validate_and_compile")
@patch.object(update_chart_preview_module, "has_dataset_access", return_value=True)
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch.object(update_chart_preview_module, "analyze_chart_semantics")
@patch.object(update_chart_preview_module, "analyze_chart_capabilities")
@patch.object(update_chart_preview_module, "generate_explore_link")
@patch.object(update_chart_preview_module, "_get_previous_form_data")
@patch("superset.mcp_service.auth.get_user_from_request")
@pytest.mark.asyncio
async def test_warns_when_previous_form_data_key_is_missing(
self,
mock_get_user_from_request,
mock_get_previous_form_data,
mock_generate_explore_link,
mock_analyze_chart_capabilities,
mock_analyze_chart_semantics,
mock_find_by_id,
unused_access_mock,
mock_validate_and_compile,
) -> None:
"""Invalid previous form_data_key is warning-only for preview updates."""
mock_user = Mock()
mock_user.id = 1
mock_get_user_from_request.return_value = mock_user
mock_find_by_id.return_value = _mock_dataset(id=3)
mock_validate_and_compile.return_value = Mock(success=True)
mock_get_previous_form_data.return_value = None
mock_generate_explore_link.return_value = (
"http://localhost:8088/explore/?form_data_key=new_preview_key"
)
mock_analyze_chart_capabilities.return_value = None
mock_analyze_chart_semantics.return_value = None
request = UpdateChartPreviewRequest(
form_data_key="nonexistent_key_12345",
dataset_id=3,
config=TableChartConfig(
chart_type="table",
columns=[
ColumnRef(name="country", label="Country"),
ColumnRef(name="sales", label="Sales", aggregate="SUM"),
],
sort_by=["sales"],
),
generate_preview=True,
preview_formats=["table"],
)
result = update_chart_preview_module.update_chart_preview(
request=request, ctx=Mock()
)
assert result["success"] is True
assert result["error"] is None
assert result["previous_form_data_key"] == "nonexistent_key_12345"
assert result["form_data_key"] == "new_preview_key"
assert result["warnings"] == [
update_chart_preview_module.INVALID_FORM_DATA_KEY_WARNING
]
mock_get_previous_form_data.assert_called_once_with("nonexistent_key_12345")
@patch.object(update_chart_preview_module, "validate_and_compile")
@patch.object(update_chart_preview_module, "has_dataset_access", return_value=True)
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch.object(update_chart_preview_module, "analyze_chart_semantics")
@patch.object(update_chart_preview_module, "analyze_chart_capabilities")
@patch.object(update_chart_preview_module, "generate_explore_link")
@patch.object(update_chart_preview_module, "_get_previous_form_data")
@patch("superset.mcp_service.auth.get_user_from_request")
@pytest.mark.asyncio
async def test_preserves_previous_adhoc_filters_without_warning(
self,
mock_get_user_from_request,
mock_get_previous_form_data,
mock_generate_explore_link,
mock_analyze_chart_capabilities,
mock_analyze_chart_semantics,
mock_find_by_id,
unused_access_mock,
mock_validate_and_compile,
) -> None:
"""Valid previous form_data preserves filters without a cache warning."""
mock_user = Mock()
mock_user.id = 1
mock_get_user_from_request.return_value = mock_user
mock_find_by_id.return_value = _mock_dataset(id=3)
mock_validate_and_compile.return_value = Mock(success=True)
cached_adhoc_filters = [
{
"clause": "WHERE",
"comparator": "North",
"expressionType": "SIMPLE",
"operator": "==",
"subject": "region",
}
]
mock_get_previous_form_data.return_value = {
"adhoc_filters": cached_adhoc_filters
}
mock_generate_explore_link.return_value = (
"http://localhost:8088/explore/?form_data_key=new_preview_key"
)
mock_analyze_chart_capabilities.return_value = None
mock_analyze_chart_semantics.return_value = None
request = UpdateChartPreviewRequest(
form_data_key="valid_key_12345",
dataset_id=3,
config=TableChartConfig(
chart_type="table",
columns=[
ColumnRef(name="country", label="Country"),
ColumnRef(name="sales", label="Sales", aggregate="SUM"),
],
sort_by=["sales"],
),
generate_preview=True,
preview_formats=["table"],
)
result = update_chart_preview_module.update_chart_preview(
request=request, ctx=Mock()
)
generated_form_data = mock_generate_explore_link.call_args.args[1]
assert generated_form_data["adhoc_filters"] == cached_adhoc_filters
assert result["success"] is True
assert result["error"] is None
assert result["warnings"] == []
mock_get_previous_form_data.assert_called_once_with("valid_key_12345")
class TestUpdateChartPreviewValidation:
"""Tier-1 validation gate and dataset access checks."""
@patch.object(update_chart_preview_module, "has_dataset_access", return_value=True)
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch.object(update_chart_preview_module, "validate_and_compile")
@patch(
"superset.mcp_service.commands.create_form_data.MCPCreateFormDataCommand.run"
)
@pytest.mark.asyncio
async def test_validation_failure_skips_cache_write(
self,
mock_create_form_data,
mock_validate,
mock_find_dataset,
unused_access_mock,
mcp_server,
mock_auth,
):
"""Bad column ref → structured error with suggestions, no cache write."""
from superset.mcp_service.chart.compile import CompileResult
from superset.mcp_service.common.error_schemas import ChartGenerationError
mock_find_dataset.return_value = _mock_dataset(id=3)
mock_validate.return_value = CompileResult(
success=False,
error="Column 'num_boys' does not exist in dataset",
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=ChartGenerationError(
error_type="invalid_column",
message="Column 'num_boys' does not exist in dataset",
details="Available columns: ds, gender, name, num, sum_boys",
suggestions=["sum_boys"],
error_code="CHART_VALIDATION_FAILED",
),
)
config = XYChartConfig(
chart_type="xy",
x=ColumnRef(name="ds"),
y=[ColumnRef(name="num_boys", aggregate="SUM")],
kind="line",
)
request = UpdateChartPreviewRequest(
form_data_key="prev_key", dataset_id="3", config=config
)
async with Client(mcp_server) as client:
result = await client.call_tool(
"update_chart_preview", {"request": request.model_dump()}
)
assert result.data["success"] is False
assert result.data["chart"] is None
error = result.data["error"]
assert isinstance(error, dict)
assert error["error_code"] == "CHART_VALIDATION_FAILED"
assert "sum_boys" in error["suggestions"]
mock_create_form_data.assert_not_called()
@patch.object(update_chart_preview_module, "has_dataset_access", return_value=False)
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch(
"superset.mcp_service.commands.create_form_data.MCPCreateFormDataCommand.run"
)
@pytest.mark.asyncio
async def test_dataset_access_denied_short_circuits(
self,
mock_create_form_data,
mock_find_dataset,
unused_access_mock,
mcp_server,
mock_auth,
):
"""has_dataset_access=False → DatasetNotAccessible, no cache write."""
mock_find_dataset.return_value = _mock_dataset(id=3)
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="region")]
)
request = UpdateChartPreviewRequest(
form_data_key="prev_key", dataset_id="3", config=config
)
async with Client(mcp_server) as client:
result = await client.call_tool(
"update_chart_preview", {"request": request.model_dump()}
)
assert result.data["success"] is False
assert result.data["chart"] is None
error = result.data["error"]
assert isinstance(error, dict)
assert error["error_type"] == "DatasetNotAccessible"
mock_create_form_data.assert_not_called()

View File

@@ -19,7 +19,6 @@
Comprehensive unit tests for MCP generate_explore_link tool
"""
import importlib
import logging
from unittest.mock import Mock, patch
@@ -38,14 +37,6 @@ from superset.mcp_service.chart.schemas import (
)
from superset.mcp_service.common.error_schemas import DatasetContext
# The package ``__init__.py`` re-exports the ``generate_explore_link`` tool
# function under the same dotted path as the module, so mock.patch's string
# lookup of ``...generate_explore_link.<attr>`` can resolve to the function
# on some Python versions. Hold a direct module reference for ``patch.object``.
generate_explore_link_module = importlib.import_module(
"superset.mcp_service.explore.tool.generate_explore_link"
)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@@ -66,29 +57,6 @@ def mock_auth():
yield mock_get_user
@pytest.fixture(autouse=True)
def mock_dataset_access_granted():
"""Grant dataset access by default; tests that need a denial override this."""
with patch.object(
generate_explore_link_module, "has_dataset_access", return_value=True
):
yield
@pytest.fixture(autouse=True)
def mock_validation_passes():
"""Skip Tier-1 dataset validation by default so Mock datasets don't trip the
real validator. Individual tests that exercise validation override this."""
from superset.mcp_service.chart.compile import CompileResult
with patch.object(
generate_explore_link_module,
"validate_and_compile",
return_value=CompileResult(success=True),
):
yield
@pytest.fixture(autouse=True)
def mock_webdriver_baseurl(app_context):
"""Mock WEBDRIVER_BASEURL_USER_FRIENDLY for consistent test URLs."""
@@ -954,102 +922,3 @@ class TestGenerateExploreLinkColumnNormalization:
assert result.data["error"] is None
# original names should pass through unchanged
assert result.data["form_data"]["x_axis"] == "orderdate"
class TestGenerateExploreLinkValidation:
"""Tier-1 validation gate (DatasetValidator) and dataset access checks."""
@pytest.fixture(autouse=True)
def mock_validation_passes(self):
"""Override the module-level autouse patch so each test in this class
can stub ``validate_and_compile`` itself. The fixture name MUST match
the module-level fixture for pytest's override-by-name to take effect.
"""
return
@patch.object(generate_explore_link_module, "validate_and_compile")
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch(
"superset.mcp_service.commands.create_form_data.MCPCreateFormDataCommand.run"
)
@pytest.mark.asyncio
async def test_validation_failure_returns_structured_error(
self,
mock_create_form_data,
mock_find_dataset,
mock_validate,
mcp_server,
):
"""Non-existent column → structured ChartGenerationError with suggestions,
and MCPCreateFormDataCommand must NOT be called (no cache write)."""
from superset.mcp_service.chart.compile import CompileResult
from superset.mcp_service.common.error_schemas import ChartGenerationError
mock_find_dataset.return_value = _mock_dataset(id=3)
mock_validate.return_value = CompileResult(
success=False,
error="Column 'num_boys' does not exist in dataset",
error_code="CHART_VALIDATION_FAILED",
tier="validation",
error_obj=ChartGenerationError(
error_type="invalid_column",
message="Column 'num_boys' does not exist in dataset",
details="Available columns: ds, gender, name, num, sum_boys",
suggestions=["sum_boys"],
error_code="CHART_VALIDATION_FAILED",
),
)
config = XYChartConfig(
chart_type="xy",
x=ColumnRef(name="ds"),
y=[ColumnRef(name="num_boys", aggregate="SUM")],
kind="line",
)
request = GenerateExploreLinkRequest(dataset_id="3", config=config)
async with Client(mcp_server) as client:
result = await client.call_tool(
"generate_explore_link", {"request": request.model_dump()}
)
assert result.data["url"] == ""
assert result.data["form_data_key"] is None
error = result.data["error"]
assert isinstance(error, dict)
assert error["error_code"] == "CHART_VALIDATION_FAILED"
assert "sum_boys" in error["suggestions"]
mock_create_form_data.assert_not_called()
@patch.object(
generate_explore_link_module, "has_dataset_access", return_value=False
)
@patch("superset.daos.dataset.DatasetDAO.find_by_id")
@patch(
"superset.mcp_service.commands.create_form_data.MCPCreateFormDataCommand.run"
)
@pytest.mark.asyncio
async def test_dataset_access_denied_short_circuits(
self,
mock_create_form_data,
mock_find_dataset,
unused_access_mock,
mcp_server,
):
"""has_dataset_access=False blocks the tool before any cache write."""
mock_find_dataset.return_value = _mock_dataset(id=3)
config = TableChartConfig(
chart_type="table", columns=[ColumnRef(name="region")]
)
request = GenerateExploreLinkRequest(dataset_id="3", config=config)
async with Client(mcp_server) as client:
result = await client.call_tool(
"generate_explore_link", {"request": request.model_dump()}
)
assert result.data["url"] == ""
# Surface as "not found" rather than leaking that the dataset exists.
assert "Dataset not found" in result.data["error"]
mock_create_form_data.assert_not_called()

View File

@@ -396,127 +396,3 @@ def test_transpile_unknown_source_engine_uses_generic() -> None:
"SELECT * FROM orders", "postgresql", "unknown_engine"
)
assert result == "SELECT * FROM orders"
# Tests for identify=True (identifier quoting)
@pytest.mark.parametrize(
"sql,dialect,expected",
[
# PostgreSQL - double-quoted identifiers
(
"STATE ILIKE '%AL%'",
"postgresql",
"\"STATE\" ILIKE '%AL%'",
),
# MySQL - backtick-quoted identifiers, ILIKE transpiled
(
"STATE ILIKE '%AL%'",
"mysql",
"LOWER(`STATE`) LIKE LOWER('%AL%')",
),
# BigQuery - backtick-quoted identifiers, ILIKE transpiled
(
"STATE ILIKE '%AL%'",
"bigquery",
"LOWER(`STATE`) LIKE LOWER('%AL%')",
),
# Snowflake - double-quoted identifiers
(
"STATE ILIKE '%AL%'",
"snowflake",
"\"STATE\" ILIKE '%AL%'",
),
# MSSQL - bracket-quoted identifiers
(
"STATE = 'CA'",
"mssql",
"[STATE] = 'CA'",
),
# Compound filter with multiple identifiers
(
"STATE = 'CA' AND AIRLINE = 'Delta'",
"postgresql",
"\"STATE\" = 'CA' AND \"AIRLINE\" = 'Delta'",
),
# Lowercase identifiers also get quoted
(
"name = 'test'",
"postgresql",
"\"name\" = 'test'",
),
],
)
def test_identify_quotes_identifiers(sql: str, dialect: str, expected: str) -> None:
"""Test that identify=True quotes identifiers per target dialect."""
assert transpile_to_dialect(sql, dialect, identify=True) == expected
def test_identify_unknown_engine_returns_unchanged() -> None:
"""Test that identify=True has no effect on unknown engines."""
sql = "STATE = 'CA'"
assert transpile_to_dialect(sql, "unknown_engine", identify=True) == sql
@pytest.mark.parametrize(
"sql,engine,expected",
[
(
"STATE ILIKE '%AL%'",
"postgresql",
"\"STATE\" ILIKE '%AL%'",
),
(
"country ILIKE '%Italy%'",
"bigquery",
"LOWER(`country`) LIKE LOWER('%Italy%')",
),
],
)
def test_identify_with_source_engine(sql: str, engine: str, expected: str) -> None:
"""Test identify=True with source_engine matching target engine."""
result = transpile_to_dialect(sql, engine, source_engine=engine, identify=True)
assert result == expected
@pytest.mark.parametrize(
"engine",
["postgresql", "bigquery", "mysql", "snowflake"],
)
def test_identify_transpilation_is_idempotent(engine: str) -> None:
"""Test that transpiling twice produces the same result (idempotent).
This matters because _sanitize_filters() can be called multiple times
via validate().
"""
clause = "STATE ILIKE '%AL%'"
pass1 = transpile_to_dialect(clause, engine, source_engine=engine, identify=True)
pass2 = transpile_to_dialect(pass1, engine, source_engine=engine, identify=True)
assert pass1 == pass2
def test_sanitize_filters_writes_back_transpiled_clause() -> None:
"""Test that _sanitize_filters always persists the transpiled clause.
Regression test: a previous conditional `if sanitized_clause != clause`
skipped the write-back when transpile_to_dialect had already modified
the clause, leaving the original unquoted value in extras.
"""
from unittest.mock import MagicMock
from superset.common.query_object import QueryObject
mock_datasource = MagicMock()
mock_datasource.database.db_engine_spec.engine = "postgresql"
query_obj = QueryObject(
datasource=mock_datasource,
columns=["STATE"],
metrics=[],
extras={
"where": "STATE = 'CA'",
"transpile_to_dialect": True,
},
)
query_obj.validate()
assert '"STATE"' in query_obj.extras["where"]

View File

@@ -21,12 +21,7 @@ from flask_babel import lazy_gettext as _
from superset.commands.chart.exceptions import ChartDataQueryFailedError
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
OAuth2RedirectError,
SupersetErrorException,
SupersetErrorsException,
SupersetVizException,
)
from superset.exceptions import SupersetErrorException, SupersetErrorsException
@mock.patch("superset.tasks.async_queries.security_manager")
@@ -154,170 +149,3 @@ def test_load_chart_data_into_cache_with_superset_errors_exception(
assert errors[1]["message"] == "Table not found"
assert errors[1]["error_type"] == SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR
assert errors[1]["level"] == ErrorLevel.WARNING
@mock.patch("superset.tasks.async_queries.security_manager")
@mock.patch("superset.tasks.async_queries.async_query_manager")
@mock.patch("superset.tasks.async_queries.get_viz")
@mock.patch("superset.tasks.async_queries.get_datasource_info")
def test_load_explore_json_into_cache_preserves_oauth2_redirect_error(
mock_get_datasource_info,
mock_get_viz,
mock_async_query_manager,
mock_security_manager,
):
"""
OAuth2RedirectError raised by ``viz_obj.get_payload`` must reach the async
job's errors list as a structured SIP-40 envelope so the frontend can
render the OAuth2 banner identically to the sync legacy path.
"""
from superset.tasks.async_queries import load_explore_json_into_cache
job_metadata = {"user_id": 1}
form_data: dict = {}
mock_get_datasource_info.return_value = (1, "table")
mock_security_manager.get_user_by_id.return_value = mock.MagicMock()
mock_async_query_manager.STATUS_ERROR = "error"
viz_obj = mock.MagicMock()
viz_obj.get_payload.side_effect = OAuth2RedirectError(
url="https://accounts.example.com/o/oauth2/v2/auth?...",
tab_id="tab-123",
redirect_uri="https://superset.example.com/oauth2/redirect",
)
mock_get_viz.return_value = viz_obj
with pytest.raises(OAuth2RedirectError):
load_explore_json_into_cache(job_metadata, form_data)
call_args = mock_async_query_manager.update_job.call_args
assert call_args[0] == (job_metadata, "error")
errors = call_args[1]["errors"]
assert len(errors) == 1
assert errors[0]["error_type"] == SupersetErrorType.OAUTH2_REDIRECT
assert errors[0]["extra"] == {
"url": "https://accounts.example.com/o/oauth2/v2/auth?...",
"tab_id": "tab-123",
"redirect_uri": "https://superset.example.com/oauth2/redirect",
}
@mock.patch("superset.tasks.async_queries.security_manager")
@mock.patch("superset.tasks.async_queries.async_query_manager")
@mock.patch("superset.tasks.async_queries.get_viz")
@mock.patch("superset.tasks.async_queries.get_datasource_info")
def test_load_explore_json_into_cache_preserves_superset_errors_exception(
mock_get_datasource_info,
mock_get_viz,
mock_async_query_manager,
mock_security_manager,
):
"""SupersetErrorsException must be preserved as a list of SIP-40 dicts."""
from superset.tasks.async_queries import load_explore_json_into_cache
job_metadata = {"user_id": 1}
form_data: dict = {}
mock_get_datasource_info.return_value = (1, "table")
mock_security_manager.get_user_by_id.return_value = mock.MagicMock()
mock_async_query_manager.STATUS_ERROR = "error"
viz_obj = mock.MagicMock()
viz_obj.get_payload.side_effect = SupersetErrorsException(
[
SupersetError(
message="Column not found",
error_type=SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR,
level=ErrorLevel.ERROR,
),
SupersetError(
message="Table not found",
error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR,
level=ErrorLevel.WARNING,
),
]
)
mock_get_viz.return_value = viz_obj
with pytest.raises(SupersetErrorsException):
load_explore_json_into_cache(job_metadata, form_data)
errors = mock_async_query_manager.update_job.call_args[1]["errors"]
assert len(errors) == 2
assert errors[0]["error_type"] == SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR
assert errors[1]["error_type"] == SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR
@mock.patch("superset.tasks.async_queries.security_manager")
@mock.patch("superset.tasks.async_queries.async_query_manager")
@mock.patch("superset.tasks.async_queries.get_viz")
@mock.patch("superset.tasks.async_queries.get_datasource_info")
def test_load_explore_json_into_cache_preserves_superset_viz_exception(
mock_get_datasource_info,
mock_get_viz,
mock_async_query_manager,
mock_security_manager,
):
"""
Test that SupersetVizException passes ``ex.errors`` straight through.
"""
from superset.tasks.async_queries import load_explore_json_into_cache
job_metadata = {"user_id": 1}
form_data: dict = {}
mock_get_datasource_info.return_value = (1, "table")
mock_security_manager.get_user_by_id.return_value = mock.MagicMock()
mock_async_query_manager.STATUS_ERROR = "error"
payload_errors = [
{
"message": "Bad column",
"error_type": SupersetErrorType.VIZ_GET_DF_ERROR,
"level": ErrorLevel.ERROR,
}
]
viz_obj = mock.MagicMock()
viz_obj.get_payload.return_value = {"errors": payload_errors}
viz_obj.has_error.return_value = True
mock_get_viz.return_value = viz_obj
with pytest.raises(SupersetVizException):
load_explore_json_into_cache(job_metadata, form_data)
errors = mock_async_query_manager.update_job.call_args[1]["errors"]
assert errors == payload_errors
@mock.patch("superset.tasks.async_queries.security_manager")
@mock.patch("superset.tasks.async_queries.async_query_manager")
@mock.patch("superset.tasks.async_queries.get_viz")
@mock.patch("superset.tasks.async_queries.get_datasource_info")
def test_load_explore_json_into_cache_falls_back_to_string_for_generic_exception(
mock_get_datasource_info,
mock_get_viz,
mock_async_query_manager,
mock_security_manager,
):
"""
Test that Non-Superset exception are passed as plain-string error.
"""
from superset.tasks.async_queries import load_explore_json_into_cache
job_metadata = {"user_id": 1}
form_data: dict = {}
mock_get_datasource_info.return_value = (1, "table")
mock_security_manager.get_user_by_id.return_value = mock.MagicMock()
mock_async_query_manager.STATUS_ERROR = "error"
viz_obj = mock.MagicMock()
viz_obj.get_payload.side_effect = RuntimeError("boom")
mock_get_viz.return_value = viz_obj
with pytest.raises(RuntimeError):
load_explore_json_into_cache(job_metadata, form_data)
errors = mock_async_query_manager.update_job.call_args[1]["errors"]
assert errors == ["boom"]

View File

@@ -1,111 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from unittest.mock import patch
import pytest
from superset import viz
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.errors import SupersetErrorType
from superset.exceptions import (
OAuth2RedirectError,
QueryObjectValidationError,
)
from superset.models.core import Database
QUERY_OBJ: dict[str, Any] = {"row_limit": 100, "from_dttm": None, "to_dttm": None}
def _viz() -> viz.BaseViz:
database = Database(database_name="d", sqlalchemy_uri="sqlite://")
datasource = SqlaTable(
table_name="t",
columns=[],
metrics=[],
main_dttm_col=None,
database=database,
)
# ``force=True`` skips the data cache lookup branch so ``get_df`` is always
# invoked, which is what we want to assert error-handling against.
return viz.BaseViz(
datasource=datasource,
form_data={"viz_type": "table"},
force=True,
)
def test_get_df_payload_propagates_oauth2_redirect_error() -> None:
"""
OAuth2RedirectError (a SupersetErrorException) must propagate out of
``get_df_payload`` so the global Flask error handler can serialize it.
"""
obj = _viz()
oauth_exc = OAuth2RedirectError(
url="https://accounts.example.com/o/oauth2/v2/auth?...",
tab_id="tab-123",
redirect_uri="https://superset.example.com/oauth2/redirect",
)
with patch.object(viz.BaseViz, "get_df", side_effect=oauth_exc):
with pytest.raises(OAuth2RedirectError) as exc_info:
obj.get_df_payload(QUERY_OBJ)
assert exc_info.value.error.error_type == SupersetErrorType.OAUTH2_REDIRECT
assert exc_info.value.error.extra == {
"url": "https://accounts.example.com/o/oauth2/v2/auth?...",
"tab_id": "tab-123",
"redirect_uri": "https://superset.example.com/oauth2/redirect",
}
def test_get_df_payload_captures_generic_exception_as_viz_get_df_error() -> None:
"""
Non-Superset exception raised by ``get_df`` are downgraded to a
``VIZ_GET_DF_ERROR`` entry on ``self.errors``.
"""
obj = _viz()
with patch.object(viz.BaseViz, "get_df", side_effect=RuntimeError("boom")):
payload = obj.get_df_payload(QUERY_OBJ)
assert obj.status == QueryStatus.FAILED
assert payload["status"] == QueryStatus.FAILED
assert len(obj.errors) == 1
assert obj.errors[0]["error_type"] == SupersetErrorType.VIZ_GET_DF_ERROR
assert obj.errors[0]["message"] == "boom"
def test_get_df_payload_captures_query_object_validation_error() -> None:
"""
``QueryObjectValidationError`` is reported as ``VIZ_GET_DF_ERROR``.
"""
obj = _viz()
with patch.object(
viz.BaseViz,
"get_df",
side_effect=QueryObjectValidationError("bad query"),
):
payload = obj.get_df_payload(QUERY_OBJ)
assert obj.status == QueryStatus.FAILED
assert payload["status"] == QueryStatus.FAILED
assert len(obj.errors) == 1
assert obj.errors[0]["error_type"] == SupersetErrorType.VIZ_GET_DF_ERROR
assert obj.errors[0]["message"] == "bad query"