Compare commits

...

8 Commits

Author SHA1 Message Date
Mehmet Salih Yavuz
a2dfaa873e fix(mcp): trim find_users response to a minimal projection
Address review feedback on the find_users tool: the response previously
serialized the full UserInfo (id, username, first/last name, email, active,
roles), exposing more identity data than disambiguating a filter target
requires. Replace with a UserMatch schema that returns only id, username,
first_name, and last_name. Email, active flag, and roles are no longer
returned by this directory-resolution path.
2026-04-27 17:57:28 +03:00
Mehmet Salih Yavuz
f13e24a6df fix(mcp): reject whitespace-only find_users query and fix workflow example
Address two valid review findings on the find_users / people-filter feature:

- A whitespace-only query (e.g. "   ") passed FindUsersRequest's min_length=1
  but stripped to "" in the LIKE predicate, producing "%%" and enumerating
  every user — contradicting the tool's documented "no full directory
  enumeration" guarantee. Add a field validator that strips and rejects blank.
- The "show me what <name> is working on" workflow example in
  DEFAULT_INSTRUCTIONS omitted the request={...} wrapper on step 3, which
  every other example in the same block uses; LLMs following it verbatim
  hit pydantic validation errors. Wrap it.
2026-04-27 13:39:26 +03:00
Mehmet Salih Yavuz
534b5aa799 test(mcp): patch find_users via patch.object to fix CI flake
The dotted-string patch targets resolved to the find_users function
(re-exported from tool/__init__.py) instead of the module under some CI
import orderings, raising AttributeError. Use importlib.import_module +
patch.object to pin the patch site to the module.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-27 13:36:10 +03:00
Mehmet Salih Yavuz
6275159eb4 fix(mcp): resolve pyright type issues from people-filter change
Pyright caught real type problems pre-commit/mypy missed in the
find_users -> created_by_fk filter change. Narrow Pydantic field
overrides, cast filter validator returns to the concrete subclass,
make optional model fields explicit (default=...) so constructor
calls type-check, and narrow MCP tool result content to TextContent
in test helpers.
2026-04-27 12:39:14 +03:00
Mehmet Salih Yavuz
c54f77a0b2 fix(mcp): let MCP clients filter dashboards/charts/datasets by person
Without a way to map a person's name to a user ID, LLMs answering "show me
what <person> is working on" fall back to the free-text search parameter,
which only matches titles/slugs and silently returns no useful results.

Add a find_users MCP tool that resolves a name (or partial name, username,
or email) to UserInfo entries, and expose created_by_fk / changed_by_fk as
filter columns on list_dashboards, list_charts, and list_datasets. The
LLM's flow: find_users("Maxime") -> user.id -> list_dashboards(filters=
[{"col": "created_by_fk", "opr": "eq", "value": <id>}]).

User-directory fields stay stripped from select_columns, sortable_columns,
search_columns, and tool responses, so owner names are still never exposed
through listing endpoints. Updated server instructions and per-tool
docstrings explain the find_users -> filter workflow and reaffirm that
find_users is sanctioned only for filter-value resolution, not for
"who owns / who can access" queries.
2026-04-27 12:20:30 +03:00
Alejandro Solares
6ad1583eb5 fix(security): bump authlib to 1.6.9 (#39598)
Co-authored-by: Đỗ Trọng Hải <41283691+hainenber@users.noreply.github.com>
2026-04-26 11:56:09 +07:00
dependabot[bot]
9a7938899e chore(deps): bump yargs from 17.7.2 to 18.0.0 in /superset-frontend (#36584)
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: hainenber <dotronghai96@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: hainenber <dotronghai96@gmail.com>
2026-04-26 11:50:07 +07:00
Haoqian Zhang
30bd490b84 fix: delete Chart under "All" in home page doesn't refresh after dele… (#39471) 2026-04-26 00:21:12 +02:00
27 changed files with 974 additions and 89 deletions

View File

@@ -52,7 +52,7 @@ attrs==25.3.0
# referencing
# requests-cache
# trio
authlib==1.6.7
authlib==1.6.9
# via fastmcp
babel==2.17.0
# via

View File

@@ -151,7 +151,7 @@
"use-query-params": "^2.2.2",
"uuid": "^14.0.0",
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz",
"yargs": "^17.7.2"
"yargs": "^18.0.0"
},
"devDependencies": {
"@babel/cli": "^7.28.6",
@@ -12769,6 +12769,25 @@
"dev": true,
"license": "ISC"
},
"node_modules/@storybook/test-runner/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@storybook/test-runner/node_modules/yargs-parser": {
"version": "18.1.3",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz",
@@ -12793,6 +12812,59 @@
"node": ">=6"
}
},
"node_modules/@storybook/test-runner/node_modules/yargs/node_modules/cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@storybook/test-runner/node_modules/yargs/node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/@storybook/test-runner/node_modules/yargs/node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/@storybook/test-runner/node_modules/yargs/node_modules/yargs-parser": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/@storybook/test/node_modules/@storybook/instrumenter": {
"version": "8.6.18",
"resolved": "https://registry.npmjs.org/@storybook/instrumenter/-/instrumenter-8.6.18.tgz",
@@ -20396,6 +20468,25 @@
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/concurrently/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/config-chain": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz",
@@ -27011,7 +27102,6 @@
"resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.3.0.tgz",
"integrity": "sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
},
@@ -30709,6 +30799,25 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-cli/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/jest-config": {
"version": "30.3.0",
"resolved": "https://registry.npmjs.org/jest-config/-/jest-config-30.3.0.tgz",
@@ -34960,6 +35069,25 @@
"dev": true,
"license": "ISC"
},
"node_modules/lerna/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/leven": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
@@ -37901,6 +38029,25 @@
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/nx/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/nyc": {
"version": "17.1.0",
"resolved": "https://registry.npmjs.org/nyc/-/nyc-17.1.0.tgz",
@@ -47947,6 +48094,24 @@
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/typescript-json-schema/node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/typewise": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/typewise/-/typewise-1.0.3.tgz",
@@ -50364,21 +50529,20 @@
}
},
"node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"version": "18.0.0",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz",
"integrity": "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==",
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"cliui": "^9.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"string-width": "^7.2.0",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
"yargs-parser": "^22.0.0"
},
"engines": {
"node": ">=12"
"node": "^20.19.0 || ^22.12.0 || >=23"
}
},
"node_modules/yargs-parser": {
@@ -50390,6 +50554,108 @@
"node": ">=12"
}
},
"node_modules/yargs/node_modules/ansi-regex": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/yargs/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/yargs/node_modules/cliui": {
"version": "9.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-9.0.1.tgz",
"integrity": "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==",
"license": "ISC",
"dependencies": {
"string-width": "^7.2.0",
"strip-ansi": "^7.1.0",
"wrap-ansi": "^9.0.0"
},
"engines": {
"node": ">=20"
}
},
"node_modules/yargs/node_modules/emoji-regex": {
"version": "10.6.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
"integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
"license": "MIT"
},
"node_modules/yargs/node_modules/string-width": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
"integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
"license": "MIT",
"dependencies": {
"emoji-regex": "^10.3.0",
"get-east-asian-width": "^1.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/yargs/node_modules/strip-ansi": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz",
"integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==",
"license": "MIT",
"dependencies": {
"ansi-regex": "^6.2.2"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/yargs/node_modules/wrap-ansi": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
"integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
"string-width": "^7.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/yargs/node_modules/yargs-parser": {
"version": "22.0.0",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-22.0.0.tgz",
"integrity": "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==",
"license": "ISC",
"engines": {
"node": "^20.19.0 || ^22.12.0 || >=23"
}
},
"node_modules/yauzl": {
"version": "2.10.0",
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
@@ -53085,7 +53351,7 @@
"license": "Apache-2.0",
"dependencies": {
"@types/d3-scale": "^4.0.9",
"d3-cloud": "^1.2.8",
"d3-cloud": "^1.2.9",
"d3-scale": "^4.0.2"
},
"devDependencies": {

View File

@@ -232,7 +232,7 @@
"use-query-params": "^2.2.2",
"uuid": "^14.0.0",
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz",
"yargs": "^17.7.2"
"yargs": "^18.0.0"
},
"devDependencies": {
"@babel/cli": "^7.28.6",

View File

@@ -27,10 +27,11 @@ process.env.PATH = `./node_modules/.bin:${process.env.PATH}`;
const { spawnSync } = require('child_process');
const fastGlob = require('fast-glob');
const { argv } = require('yargs');
const yargs = require('yargs');
const { hideBin } = require('yargs/helpers');
const { _: globs } = argv;
const glob = globs.length > 1 ? `{${globs.join(',')}}` : globs[0] || '*';
const { globs } = yargs(hideBin(process.argv)).parse();
const glob = globs?.length > 1 ? `{${globs.join(',')}}` : globs?.[0] || '*';
const BABEL_CONFIG = '--config-file=../../babel.config.js';

View File

@@ -34,6 +34,8 @@ import Chart from 'src/types/Chart';
import { FacePile } from 'src/components';
import { handleChartDelete, CardStyles } from 'src/views/CRUD/utils';
import { assetUrl } from 'src/utils/assetUrl';
import type { ListViewFetchDataConfig as FetchDataConfig } from 'src/components';
import { TableTab } from 'src/views/CRUD/types';
interface ChartCardProps {
chart: Chart;
@@ -42,7 +44,7 @@ interface ChartCardProps {
bulkSelectEnabled: boolean;
addDangerToast: (msg: string) => void;
addSuccessToast: (msg: string) => void;
refreshData: () => void;
refreshData: (config?: FetchDataConfig | null) => void;
loading?: boolean;
saveFavoriteStatus: (id: number, isStarred: boolean) => void;
favoriteStatus: boolean;
@@ -50,6 +52,7 @@ interface ChartCardProps {
userId?: string | number;
showThumbnails?: boolean;
handleBulkChartExport: (chartsToExport: Chart[]) => void;
getData?: (tab: TableTab) => void;
}
export default function ChartCard({
@@ -67,6 +70,7 @@ export default function ChartCard({
chartFilter,
userId,
handleBulkChartExport,
getData,
}: ChartCardProps) {
const history = useHistory();
const canEdit = hasPerm('can_write');
@@ -136,6 +140,7 @@ export default function ChartCard({
refreshData,
chartFilter,
userId,
getData,
)
}
>

View File

@@ -26,6 +26,7 @@ import { VizType } from '@superset-ui/core';
import fetchMock from 'fetch-mock';
import { act } from 'react-dom/test-utils';
import handleResourceExport from 'src/utils/export';
import { LocalStorageKeys } from 'src/utils/localStorageHelpers';
import ChartTable from './ChartTable';
// Mock the export module
@@ -53,12 +54,16 @@ const mockCharts = Array.from({ length: 3 }).map((_, i) => ({
thumbnail_url: '',
}));
fetchMock.get(chartsEndpoint, {
result: mockCharts,
});
fetchMock.get(
chartsEndpoint,
{
result: mockCharts,
},
{ name: chartsEndpoint },
);
fetchMock.get(chartsInfoEndpoint, {
permissions: ['can_add', 'can_edit', 'can_delete', 'can_export'],
permissions: ['can_add', 'can_write', 'can_delete', 'can_export'],
});
fetchMock.get(chartFavoriteStatusEndpoint, {
@@ -99,6 +104,10 @@ const renderChartTable = (props: any) =>
render(<ChartTable {...props} />, renderOptions);
});
beforeEach(() => {
window.localStorage.removeItem(LocalStorageKeys.HomepageChartFilter);
});
test('renders with EmptyState if no data present', async () => {
await renderChartTable(mockedProps);
expect(screen.getAllByRole('tab')).toHaveLength(3);
@@ -178,3 +187,58 @@ test('handles chart export with correct ID and shows spinner', async () => {
{ timeout: 3000 },
);
});
test('refreshes other tab data after deleting a chart', async () => {
fetchMock.removeRoute(chartsEndpoint);
fetchMock.get(
chartsEndpoint,
{
result: mockCharts.slice(1),
count: mockCharts.length - 1,
},
{ name: chartsEndpoint },
);
fetchMock.delete('glob:*/api/v1/chart/0', {
message: 'Chart deleted',
});
await renderChartTable({
...otherTabProps,
otherTabTitle: 'All',
});
expect(screen.getByText('cool chart 0')).toBeInTheDocument();
const refreshCallsBeforeDelete =
fetchMock.callHistory.calls(chartsEndpoint).length;
const moreButtons = screen.getAllByRole('img', { name: /more/i });
await userEvent.click(moreButtons[0]);
await userEvent.click(await screen.findByText('Delete'));
const deleteInput = screen.getByTestId('delete-modal-input');
await userEvent.type(deleteInput, 'DELETE');
await userEvent.click(screen.getByTestId('modal-confirm-button'));
await waitFor(() => {
expect(
fetchMock.callHistory.calls(/api\/v1\/chart\/0/, {
method: 'DELETE',
}),
).toHaveLength(1);
});
await waitFor(() => {
expect(fetchMock.callHistory.calls(chartsEndpoint).length).toBe(
refreshCallsBeforeDelete + 1,
);
});
await waitFor(() => {
expect(screen.queryByText('cool chart 0')).not.toBeInTheDocument();
});
expect(screen.getByText('cool chart 1')).toBeInTheDocument();
expect(screen.getByText('cool chart 2')).toBeInTheDocument();
});

View File

@@ -112,18 +112,19 @@ function ChartTable({
const [preparingExport, setPreparingExport] = useState<boolean>(false);
const [loaded, setLoaded] = useState<boolean>(false);
const getData = (tab: TableTab) =>
fetchData({
pageIndex: 0,
pageSize: PAGE_SIZE,
sortBy: [
{
id: 'changed_on_delta_humanized',
desc: true,
},
],
filters: getFilterValues(tab, WelcomeTable.Charts, user, otherTabFilters),
});
const getChartFetchDataConfig = (tab: TableTab) => ({
pageIndex: 0,
pageSize: PAGE_SIZE,
sortBy: [
{
id: 'changed_on_delta_humanized',
desc: true,
},
],
filters: getFilterValues(tab, WelcomeTable.Charts, user, otherTabFilters),
});
const getData = (tab: TableTab) => fetchData(getChartFetchDataConfig(tab));
useEffect(() => {
if (loaded || activeTab === TableTab.Favorite) {
@@ -234,6 +235,7 @@ function ChartTable({
refreshData={refreshData}
addDangerToast={addDangerToast}
addSuccessToast={addSuccessToast}
getData={getData}
favoriteStatus={favoriteStatus[e.id]}
saveFavoriteStatus={saveFavoriteStatus}
handleBulkChartExport={handleBulkChartExport}

View File

@@ -327,6 +327,7 @@ export function handleChartDelete(
refreshData: (arg0?: FetchDataConfig | null) => void,
chartFilter?: string,
userId?: string | number,
getData?: (tab: TableTab) => void,
) {
const filters = {
pageIndex: 0,
@@ -350,6 +351,7 @@ export function handleChartDelete(
}).then(
() => {
if (chartFilter === 'Mine') refreshData(filters);
else if (chartFilter && getData) getData(chartFilter as TableTab);
else refreshData();
addSuccessToast(t('Deleted: %s', sliceName));
},

View File

@@ -36,11 +36,14 @@ const {
} = require('webpack-manifest-plugin');
const ForkTsCheckerWebpackPlugin = require('fork-ts-checker-webpack-plugin');
const ReactRefreshWebpackPlugin = require('@pmmmwh/react-refresh-webpack-plugin');
const parsedArgs = require('yargs').argv;
const yargs = require('yargs');
const { hideBin } = require('yargs/helpers');
const Visualizer = require('webpack-visualizer-plugin2');
const getProxyConfig = require('./webpack.proxy-config');
const packageConfig = require('./package.json');
const parsedArgs = yargs(hideBin(process.argv)).parse();
// input dir
const APP_DIR = path.resolve(__dirname, './');
// output dir

View File

@@ -20,8 +20,8 @@ const zlib = require('zlib');
const { ZSTDDecompress } = require('simple-zstd');
const yargs = require('yargs');
// eslint-disable-next-line import/no-extraneous-dependencies
const parsedArgs = yargs.argv;
const { hideBin } = require('yargs/helpers');
const parsedArgs = yargs(hideBin(process.argv)).parse();
const parsedEnvArg = () => {
let envArgs = {};

View File

@@ -84,6 +84,7 @@ Schema Discovery:
System Information:
- get_instance_info: Get instance-wide statistics, metadata, and current user identity
- find_users: Resolve a person's name to user IDs for use as a filter value
- health_check: Simple health check tool (takes NO parameters, call without arguments)
Available Resources:
@@ -123,6 +124,16 @@ Some tools do not use a request wrapper, so follow each tool's schema
Recommended Workflows:
To filter dashboards/charts/datasets by a person ("show me what <name> is working on"):
1. find_users(request={{"query": "<name>"}}) -> resolve to user IDs
2. Pick the matching user.id from the response
3. list_dashboards(request={{"filters": [
{{"col": "created_by_fk", "opr": "eq", "value": <id>}}
]}}) — same shape for list_charts / list_datasets.
(use changed_by_fk for "last modified by", or "in" with a list of IDs for
multiple matches). Do NOT pass the person's name as the search parameter —
search matches titles, not people.
To add a chart to an existing dashboard:
1. add_chart_to_existing_dashboard(dashboard_id, chart_id) -> updates dashboard directly
- If permission_denied=True is returned: inform the user they lack edit rights,
@@ -263,6 +274,11 @@ Permission Awareness:
contact details, roles, admin status, ownership, or access-list information.
- Do NOT infer access-list answers from dashboard metadata such as published status,
role restrictions, empty owner lists, or schema fields.
- find_users is sanctioned ONLY for resolving a name the user supplied into a
user ID for filtering (e.g., "what is <name> working on" -> filter
list_dashboards by created_by_fk). Do NOT use find_users to answer "who owns
X", "who can access X", "is <name> an admin", or to enumerate the directory.
Never return find_users output to the user verbatim.
- Do NOT use execute_sql to query user, role, owner, or access-list tables for this
information.
- You may reference the current user's own identity details when appropriate, such
@@ -509,6 +525,7 @@ from superset.mcp_service.system import ( # noqa: F401, E402
resources as system_resources,
)
from superset.mcp_service.system.tool import ( # noqa: F401, E402
find_users,
get_instance_info,
get_schema,
health_check,

View File

@@ -23,7 +23,7 @@ from __future__ import annotations
import difflib
from datetime import datetime, timezone
from typing import Annotated, Any, Dict, List, Literal, Protocol
from typing import Annotated, Any, cast, Dict, List, Literal, Protocol
import humanize
from pydantic import (
@@ -141,7 +141,7 @@ class ChartInfo(BaseModel):
),
)
form_data_key: str | None = Field(
None,
default=None,
description=(
"Cache key used to retrieve unsaved form_data. When present, indicates "
"the form_data came from cache (unsaved edits) rather than the saved chart."
@@ -435,14 +435,18 @@ class ChartFilter(ColumnOperator):
value: The value to filter by (type depends on col and opr).
"""
col: Literal[
col: Literal[ # pyright: ignore[reportIncompatibleVariableOverride]
"slice_name",
"viz_type",
"datasource_name",
"created_by_fk",
"changed_by_fk",
] = Field(
...,
description="Column to filter on. Use get_schema(model_type='chart') for "
"available filter columns.",
"available filter columns. To filter by a person, first call find_users "
"to resolve a name to a user ID, then filter by created_by_fk or "
"changed_by_fk with that integer ID.",
)
opr: ColumnOperatorEnum = Field(
...,
@@ -1351,7 +1355,10 @@ class ListChartsRequest(MetadataCacheControl):
"""
from superset.mcp_service.utils.schema_utils import parse_json_or_model_list
return parse_json_or_model_list(v, ChartFilter, "filters")
return cast(
List[ChartFilter],
parse_json_or_model_list(v, ChartFilter, "filters"),
)
@field_validator("select_columns", mode="before")
@classmethod

View File

@@ -90,6 +90,10 @@ async def list_charts(
Sortable columns for order_column: id, slice_name, viz_type, description,
changed_on, created_on
To filter by a person, call find_users to resolve the name to a user ID,
then pass it as a filter: filters=[{"col": "created_by_fk", "opr": "eq",
"value": <id>}] (or "changed_by_fk"). Do not pass the name as search.
"""
await ctx.info(
"Listing charts: page=%s, page_size=%s, search=%s"

View File

@@ -37,10 +37,12 @@ class ColumnMetadata(BaseModel):
"""Metadata for a selectable column."""
name: str = Field(..., description="Column name to use in select_columns")
description: str | None = Field(None, description="Column description")
type: str | None = Field(None, description="Data type (str, int, datetime, etc.)")
description: str | None = Field(default=None, description="Column description")
type: str | None = Field(
default=None, description="Data type (str, int, datetime, etc.)"
)
is_default: bool = Field(
False, description="Whether this column is included by default"
default=False, description="Whether this column is included by default"
)

View File

@@ -67,7 +67,7 @@ from __future__ import annotations
import logging
from datetime import datetime
from typing import Annotated, Any, Dict, List, Literal, TYPE_CHECKING
from typing import Annotated, Any, cast, Dict, List, Literal, TYPE_CHECKING
import humanize
from pydantic import (
@@ -155,16 +155,20 @@ class DashboardFilter(ColumnOperator):
value: The value to filter by (type depends on col and opr).
"""
col: Literal[
col: Literal[ # pyright: ignore[reportIncompatibleVariableOverride]
"dashboard_title",
"published",
"favorite",
"created_by_fk",
"changed_by_fk",
] = Field(
...,
description=(
"Column to filter on. Use "
"get_schema(model_type='dashboard') for available "
"filter columns."
"filter columns. To filter by a person, first call find_users to "
"resolve a name to a user ID, then filter by created_by_fk or "
"changed_by_fk with that integer ID."
),
)
opr: ColumnOperatorEnum = Field(
@@ -209,7 +213,10 @@ class ListDashboardsRequest(MetadataCacheControl):
"""
from superset.mcp_service.utils.schema_utils import parse_json_or_model_list
return parse_json_or_model_list(v, DashboardFilter, "filters")
return cast(
List[DashboardFilter],
parse_json_or_model_list(v, DashboardFilter, "filters"),
)
@field_validator("select_columns", mode="before")
@classmethod
@@ -379,14 +386,14 @@ class DashboardInfo(BaseModel):
# Fields for permalink/filter state support
permalink_key: str | None = Field(
None,
default=None,
description=(
"Permalink key used to retrieve filter state. When present, indicates "
"the filter_state came from a permalink rather than the default dashboard."
),
)
filter_state: Dict[str, Any] | None = Field(
None,
default=None,
description=(
"Filter state from permalink. Contains dataMask (native filter values), "
"activeTabs, anchor, and urlParams. When present, represents the actual "

View File

@@ -84,6 +84,12 @@ async def list_dashboards(
Sortable columns for order_column: id, dashboard_title, slug, published,
changed_on, created_on
To filter by a person (e.g. "dashboards Maxime is working on"), do NOT pass
the name as the search parameter — search matches titles and slugs only.
Instead, call find_users to resolve the name to a user ID, then pass it as
a filter: filters=[{"col": "created_by_fk", "opr": "eq", "value": <id>}]
(or "changed_by_fk" for "last modified by").
"""
await ctx.info(
"Listing dashboards: page=%s, page_size=%s, search=%s"

View File

@@ -22,7 +22,7 @@ Pydantic schemas for database-related responses
from __future__ import annotations
from datetime import datetime
from typing import Annotated, Any, Dict, List, Literal
from typing import Annotated, Any, cast, Dict, List, Literal
import humanize
from pydantic import (
@@ -55,7 +55,7 @@ class DatabaseFilter(ColumnOperator):
value: The value to filter by (type depends on col and opr).
"""
col: Literal[
col: Literal[ # pyright: ignore[reportIncompatibleVariableOverride]
"database_name",
"expose_in_sqllab",
"allow_file_upload",
@@ -239,7 +239,10 @@ class ListDatabasesRequest(MetadataCacheControl):
@classmethod
def parse_filters(cls, v: Any) -> List[DatabaseFilter]:
"""Accept both JSON string and list of objects."""
return parse_json_or_model_list(v, DatabaseFilter, "filters")
return cast(
List[DatabaseFilter],
parse_json_or_model_list(v, DatabaseFilter, "filters"),
)
@field_validator("select_columns", mode="before")
@classmethod

View File

@@ -54,14 +54,18 @@ class DatasetFilter(ColumnOperator):
value: The value to filter by (type depends on col and opr).
"""
col: Literal[
col: Literal[ # pyright: ignore[reportIncompatibleVariableOverride]
"table_name",
"schema",
"database_name",
"created_by_fk",
"changed_by_fk",
] = Field(
...,
description="Column to filter on. Use get_schema(model_type='dataset') for "
"available filter columns.",
"available filter columns. To filter by a person, first call find_users "
"to resolve a name to a user ID, then filter by created_by_fk or "
"changed_by_fk with that integer ID.",
)
opr: ColumnOperatorEnum = Field(
...,
@@ -415,7 +419,7 @@ def serialize_dataset_object(dataset: Any) -> DatasetInfo | None:
params = None
columns = [
TableColumnInfo(
column_name=getattr(col, "column_name", None),
column_name=getattr(col, "column_name", None) or "",
verbose_name=getattr(col, "verbose_name", None),
type=getattr(col, "type", None),
is_dttm=getattr(col, "is_dttm", None),
@@ -427,7 +431,7 @@ def serialize_dataset_object(dataset: Any) -> DatasetInfo | None:
]
metrics = [
SqlMetricInfo(
metric_name=getattr(metric, "metric_name", None),
metric_name=getattr(metric, "metric_name", None) or "",
verbose_name=getattr(metric, "verbose_name", None),
expression=getattr(metric, "expression", None),
description=getattr(metric, "description", None),
@@ -438,7 +442,7 @@ def serialize_dataset_object(dataset: Any) -> DatasetInfo | None:
return DatasetInfo(
id=getattr(dataset, "id", None),
table_name=getattr(dataset, "table_name", None),
schema_name=getattr(dataset, "schema", None),
schema=getattr(dataset, "schema", None),
database_name=getattr(dataset.database, "database_name", None)
if getattr(dataset, "database", None)
else None,

View File

@@ -98,6 +98,10 @@ async def list_datasets(
Sortable columns for order_column: id, table_name, schema, changed_on,
created_on
To filter by a person, call find_users to resolve the name to a user ID,
then pass it as a filter: filters=[{"col": "created_by_fk", "opr": "eq",
"value": <id>}] (or "changed_by_fk"). Do not pass the name as search.
"""
if ctx is None:
raise RuntimeError("FastMCP context is required for list_datasets")

View File

@@ -29,6 +29,7 @@ from superset.mcp_service.constants import ModelType
from superset.mcp_service.privacy import (
filter_user_directory_columns,
USER_DIRECTORY_FIELDS,
USER_FILTER_FIELDS,
)
from superset.mcp_service.utils import _is_uuid
@@ -245,14 +246,6 @@ class ModelListCore(BaseCore, Generic[L]):
has_previous=page > 0,
)
# Build response
def get_keys(obj: BaseModel | dict[str, Any] | Any) -> List[str]:
if hasattr(obj, "model_dump"):
return list(obj.model_dump().keys())
elif isinstance(obj, dict):
return list(obj.keys())
return []
response_kwargs = {
self.list_field_name: item_objs,
"count": len(item_objs),
@@ -425,7 +418,7 @@ class InstanceInfoCore(BaseCore):
return counts
def _calculate_time_based_metrics(
self, base_counts: Dict[str, int]
self, _base_counts: Dict[str, int]
) -> Dict[str, Dict[str, int]]:
"""Calculate time-based metrics for recent activity."""
from datetime import datetime, timedelta, timezone
@@ -612,7 +605,9 @@ class ModelGetSchemaCore(BaseCore, Generic[S]):
self.default_sort = default_sort
self.default_sort_direction = default_sort_direction
self.exclude_filter_columns = set(exclude_filter_columns or set())
self.exclude_filter_columns.update(USER_DIRECTORY_FIELDS)
# Hide user-directory columns from filter discovery, except the small
# set callers may legitimately filter by ID (resolved via find_users).
self.exclude_filter_columns.update(USER_DIRECTORY_FIELDS - USER_FILTER_FIELDS)
def _get_filter_columns(self) -> Dict[str, List[str]]:
"""Get filterable columns and operators from the DAO."""

View File

@@ -44,6 +44,12 @@ USER_DIRECTORY_FIELDS = frozenset(
}
)
# User-directory columns that may be used as filter values (an integer user ID).
# These remain stripped from select_columns, sort, search, and tool responses
# (so the directory itself is never exposed), but list tools may filter rows by
# them when the caller already has an ID — typically resolved via find_users.
USER_FILTER_FIELDS = frozenset({"created_by_fk", "changed_by_fk"})
DATA_MODEL_METADATA_ACCESS_ATTR = "_requires_data_model_metadata_access"
DATA_MODEL_METADATA_ERROR_TYPE = "DataModelMetadataRestricted"
DATA_MODEL_METADATA_PRIVACY_SCOPE = "data_model"

View File

@@ -25,9 +25,11 @@ system-level info.
from __future__ import annotations
from datetime import datetime
from typing import Any, Dict, List
from typing import Annotated, Any, Dict, List
from pydantic import BaseModel, ConfigDict, Field
from pydantic import BaseModel, ConfigDict, Field, field_validator
from superset.mcp_service.constants import DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE
class HealthCheckResponse(BaseModel):
@@ -170,6 +172,84 @@ def serialize_user_object(user: Any) -> UserInfo | None:
)
class FindUsersRequest(BaseModel):
"""Request schema for find_users tool.
Resolves a person's name (or partial name, username, or email) to user IDs
so they can be passed to listing tools as filter values for created_by_fk
or changed_by_fk. This is the only sanctioned path for "show me what
<person> is working on" queries.
"""
model_config = ConfigDict(extra="forbid")
query: Annotated[
str,
Field(
min_length=1,
max_length=200,
description=(
"Substring to match (case-insensitive) against username, "
"first_name, last_name, and email. Required and non-empty: "
"this tool does not enumerate the full user directory."
),
),
]
page_size: Annotated[
int,
Field(
default=DEFAULT_PAGE_SIZE,
gt=0,
le=MAX_PAGE_SIZE,
description=f"Maximum number of matches to return (max {MAX_PAGE_SIZE}).",
),
]
@field_validator("query")
@classmethod
def _reject_blank_query(cls, value: str) -> str:
# min_length=1 alone admits whitespace-only strings, which strip to "" and
# produce a "%%" LIKE pattern that matches every user. Strip and require
# at least one non-space character.
stripped = value.strip()
if not stripped:
raise ValueError("query must contain at least one non-whitespace character")
return stripped
class UserMatch(BaseModel):
"""Minimal user projection returned by find_users.
Intentionally narrower than UserInfo: only the fields needed to disambiguate
matches and pass an id to created_by_fk / changed_by_fk filters. Email,
active flag, and roles are deliberately excluded to limit identity
exposure through this directory-resolution path.
"""
id: int | None = None
username: str | None = None
first_name: str | None = None
last_name: str | None = None
class FindUsersResponse(BaseModel):
"""Response schema for find_users tool."""
users: List[UserMatch] = Field(
default_factory=list,
description=(
"Matching users. Pass user.id as the value for created_by_fk or "
"changed_by_fk filters on list_dashboards, list_charts, and "
"list_datasets."
),
)
count: int = Field(..., description="Number of users returned in this response.")
truncated: bool = Field(
default=False,
description="True when the query matched more rows than page_size allows.",
)
class TagInfo(BaseModel):
id: int | None = None
name: str | None = None

View File

@@ -17,11 +17,13 @@
"""System tools for MCP service."""
from .find_users import find_users
from .get_instance_info import get_instance_info
from .get_schema import get_schema
from .health_check import health_check
__all__ = [
"find_users",
"health_check",
"get_instance_info",
"get_schema",

View File

@@ -0,0 +1,101 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""find_users MCP tool: resolve a person's name to user IDs for filtering."""
import logging
from fastmcp import Context
from sqlalchemy import or_
from superset_core.mcp.decorators import tool, ToolAnnotations
from superset.extensions import db, event_logger, security_manager
from superset.mcp_service.system.schemas import (
FindUsersRequest,
FindUsersResponse,
UserMatch,
)
logger = logging.getLogger(__name__)
@tool(
tags=["core"],
annotations=ToolAnnotations(
title="Find users",
readOnlyHint=True,
destructiveHint=False,
),
)
async def find_users(request: FindUsersRequest, ctx: Context) -> FindUsersResponse:
"""Resolve a person's name to user IDs so they can be used as filter values.
Use this when the caller asks "show me <person>'s dashboards/charts/datasets"
or "what is <person> working on". Take the matching user.id and pass it as
the value for a created_by_fk or changed_by_fk filter on list_dashboards,
list_charts, or list_datasets.
Matches case-insensitively against username, first_name, last_name, and
email. The query is required and non-empty; this tool does not enumerate
the full user directory.
Privacy: returning a user's identity here is sanctioned only for resolving
filter values. Do not use the response to answer "who owns X", "who can
access X", or any access-list question — those remain off-limits per the
server instructions.
"""
await ctx.info(
"Resolving user query: query=%s, page_size=%s"
% (request.query, request.page_size)
)
user_model = security_manager.user_model
needle = f"%{request.query.strip()}%"
with event_logger.log_context(action="mcp.find_users.query"):
query = (
db.session.query(user_model)
.filter(
or_(
user_model.username.ilike(needle),
user_model.first_name.ilike(needle),
user_model.last_name.ilike(needle),
user_model.email.ilike(needle),
)
)
.order_by(user_model.username.asc())
)
# Fetch one extra row to detect truncation without a separate count query.
rows = query.limit(request.page_size + 1).all()
truncated = len(rows) > request.page_size
rows = rows[: request.page_size]
users: list[UserMatch] = [
UserMatch(
id=getattr(row, "id", None),
username=getattr(row, "username", None),
first_name=getattr(row, "first_name", None),
last_name=getattr(row, "last_name", None),
)
for row in rows
]
await ctx.info(
"Resolved user query: matches=%s, truncated=%s" % (len(users), truncated)
)
return FindUsersResponse(users=users, count=len(users), truncated=truncated)

View File

@@ -0,0 +1,257 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for find_users MCP tool and its filter contract."""
import importlib
from unittest.mock import MagicMock, Mock, patch
import pytest
from fastmcp import Client
from fastmcp.exceptions import ToolError
from pydantic import ValidationError
from superset.mcp_service.app import mcp
from superset.mcp_service.system.schemas import FindUsersRequest, FindUsersResponse
from superset.utils import json
# Import the submodule directly so ``patch.object`` targets the module (not the
# ``find_users`` function that ``tool/__init__.py`` re-exports onto the
# package). The package attribute is the function, so dotted-string patches
# like ``superset.mcp_service.system.tool.find_users.db`` can resolve to the
# function in some import orderings and fail with AttributeError.
find_users_module = importlib.import_module(
"superset.mcp_service.system.tool.find_users"
)
@pytest.fixture
def mcp_server():
return mcp
@pytest.fixture(autouse=True)
def mock_auth():
"""Mock authentication for all tests."""
with patch("superset.mcp_service.auth.get_user_from_request") as mock_get_user:
mock_user = Mock()
mock_user.id = 1
mock_user.username = "admin"
mock_get_user.return_value = mock_user
yield mock_get_user
def _make_user(id_, username, first=None, last=None, email=None, active=True):
"""Build a Mock user with the attributes serialize_user_object reads."""
user = Mock(
spec=["id", "username", "first_name", "last_name", "email", "active", "roles"]
)
user.id = id_
user.username = username
user.first_name = first
user.last_name = last
user.email = email
user.active = active
user.roles = []
return user
def _patch_user_query(rows):
"""Patch the SQLAlchemy chain used by find_users to return a fixed result set."""
chain = MagicMock()
chain.filter.return_value = chain
chain.order_by.return_value = chain
chain.limit.return_value = chain
chain.all.return_value = rows
session = MagicMock()
session.query.return_value = chain
return session, chain
# ---------------------------------------------------------------------------
# Schema tests
# ---------------------------------------------------------------------------
def test_find_users_request_rejects_empty_query():
with pytest.raises(ValidationError):
FindUsersRequest(query="")
def test_find_users_request_rejects_extra_fields():
with pytest.raises(ValidationError):
FindUsersRequest(query="maxime", random_field="x")
def test_find_users_response_default_truncated_false():
resp = FindUsersResponse(users=[], count=0)
assert resp.truncated is False
# ---------------------------------------------------------------------------
# Tool-level tests
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_find_users_returns_matches(mcp_server):
rows = [
_make_user(
7, "maxime", first="Maxime", last="Beauchemin", email="m@example.com"
)
]
session, _ = _patch_user_query(rows)
with (
patch.object(find_users_module, "db") as mock_db,
patch.object(find_users_module, "security_manager") as mock_sm,
patch.object(find_users_module, "or_") as mock_or,
):
mock_db.session = session
mock_sm.user_model = MagicMock()
mock_or.return_value = MagicMock()
async with Client(mcp_server) as client:
result = await client.call_tool(
"find_users", {"request": {"query": "maxime"}}
)
data = json.loads(result.content[0].text)
assert data["count"] == 1
assert data["truncated"] is False
assert data["users"][0]["id"] == 7
assert data["users"][0]["username"] == "maxime"
assert data["users"][0]["first_name"] == "Maxime"
assert data["users"][0]["last_name"] == "Beauchemin"
# Privacy: minimal projection excludes identity attributes that aren't
# required for filter resolution. Catch regressions on the response shape.
for forbidden in ("email", "active", "roles"):
assert forbidden not in data["users"][0]
# or_ should have been built across the four matched columns
assert mock_or.called
assert len(mock_or.call_args.args) == 4
@pytest.mark.asyncio
async def test_find_users_truncates_when_more_rows_than_page_size(mcp_server):
# page_size=2 with 3 returned rows -> truncated, response trimmed to 2
rows = [
_make_user(1, "a"),
_make_user(2, "b"),
_make_user(3, "c"),
]
session, chain = _patch_user_query(rows)
with (
patch.object(find_users_module, "db") as mock_db,
patch.object(find_users_module, "security_manager") as mock_sm,
patch.object(find_users_module, "or_") as mock_or,
):
mock_db.session = session
mock_sm.user_model = MagicMock()
mock_or.return_value = MagicMock()
async with Client(mcp_server) as client:
result = await client.call_tool(
"find_users", {"request": {"query": "a", "page_size": 2}}
)
# Tool requested page_size+1 rows for truncation detection
chain.limit.assert_called_with(3)
data = json.loads(result.content[0].text)
assert data["count"] == 2
assert data["truncated"] is True
assert [u["id"] for u in data["users"]] == [1, 2]
@pytest.mark.asyncio
async def test_find_users_rejects_empty_query_via_client(mcp_server):
async with Client(mcp_server) as client:
with pytest.raises(ToolError):
await client.call_tool("find_users", {"request": {"query": ""}})
@pytest.mark.parametrize("blank", [" ", " ", "\t", "\n \t"])
def test_find_users_request_rejects_whitespace_only_query(blank):
# Whitespace-only queries would strip to "" and produce a LIKE "%%" pattern
# that enumerates the entire user directory. The validator must reject them.
with pytest.raises(ValidationError):
FindUsersRequest(query=blank)
def test_find_users_request_strips_query_whitespace():
# Validator should normalize the stored query so downstream LIKE patterns
# don't carry leading/trailing whitespace.
request = FindUsersRequest(query=" maxime ")
assert request.query == "maxime"
# ---------------------------------------------------------------------------
# Filter contract: created_by_fk / changed_by_fk filtering on list tools
# ---------------------------------------------------------------------------
@patch("superset.daos.dashboard.DashboardDAO.list")
@pytest.mark.asyncio
async def test_list_dashboards_passes_created_by_fk_filter_to_dao(
mock_list, mcp_server
):
"""list_dashboards should accept created_by_fk filter and forward it."""
mock_list.return_value = ([], 0)
async with Client(mcp_server) as client:
await client.call_tool(
"list_dashboards",
{
"request": {
"filters": [{"col": "created_by_fk", "opr": "eq", "value": 7}],
"page": 1,
"page_size": 10,
}
},
)
assert mock_list.called
forwarded_filters = mock_list.call_args.kwargs.get("column_operators")
assert forwarded_filters is not None
assert any(
getattr(f, "col", None) == "created_by_fk" and getattr(f, "value", None) == 7
for f in forwarded_filters
)
@patch("superset.daos.chart.ChartDAO.list")
@pytest.mark.asyncio
async def test_list_charts_passes_changed_by_fk_filter_to_dao(mock_list, mcp_server):
"""list_charts should accept changed_by_fk filter and forward it."""
mock_list.return_value = ([], 0)
async with Client(mcp_server) as client:
await client.call_tool(
"list_charts",
{
"request": {
"filters": [{"col": "changed_by_fk", "opr": "eq", "value": 7}],
"page": 1,
"page_size": 10,
}
},
)
assert mock_list.called
forwarded_filters = mock_list.call_args.kwargs.get("column_operators")
assert forwarded_filters is not None
assert any(getattr(f, "col", None) == "changed_by_fk" for f in forwarded_filters)

View File

@@ -22,6 +22,8 @@ from unittest.mock import Mock, patch
import pytest
from fastmcp import Client
from fastmcp.client.client import CallToolResult
from mcp.types import TextContent
from pydantic import ValidationError
from superset.mcp_service.app import mcp
@@ -45,6 +47,14 @@ get_schema_module = importlib.import_module(
"superset.mcp_service.system.tool.get_schema"
)
def _result_text(result: CallToolResult) -> str:
"""Return the text payload from the first content block of a tool result."""
block = result.content[0]
assert isinstance(block, TextContent)
return block.text
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@@ -198,7 +208,7 @@ async def test_get_schema_returns_structured_privacy_error_for_dataset(mcp_serve
{"request": {"model_type": "dataset"}},
)
data = json.loads(result.content[0].text)
data = json.loads(_result_text(result))
assert data["error_type"] == DATA_MODEL_METADATA_ERROR_TYPE
assert data["privacy_scope"] == "data_model"
@@ -241,7 +251,7 @@ async def test_get_schema_redacts_chart_data_model_fields(mcp_server):
{"request": {"model_type": "chart"}},
)
data = json.loads(result.content[0].text)
data = json.loads(_result_text(result))
schema_info = data["schema_info"]
assert all(
column["name"] not in CHART_DATA_MODEL_COLUMNS
@@ -389,7 +399,7 @@ class TestGetInstanceInfoCurrentUserViaMCP:
async with Client(mcp_server) as client:
result = await client.call_tool("get_instance_info", {"request": {}})
data = json.loads(result.content[0].text)
data = json.loads(_result_text(result))
assert "current_user" in data
cu = data["current_user"]
assert cu["id"] == 5
@@ -418,7 +428,7 @@ class TestGetInstanceInfoCurrentUserViaMCP:
async with Client(mcp_server) as client:
result = await client.call_tool("get_instance_info", {"request": {}})
data = json.loads(result.content[0].text)
data = json.loads(_result_text(result))
assert data["current_user"] is None
@pytest.mark.asyncio
@@ -444,7 +454,7 @@ class TestGetInstanceInfoCurrentUserViaMCP:
async with Client(mcp_server) as client:
result = await client.call_tool("get_instance_info", {"request": {}})
data = json.loads(result.content[0].text)
data = json.loads(_result_text(result))
cu = data["current_user"]
assert cu["id"] == 99
assert cu["username"] == "bot"
@@ -460,28 +470,50 @@ class TestGetInstanceInfoCurrentUserViaMCP:
# ---------------------------------------------------------------------------
def test_chart_filter_rejects_created_by_fk() -> None:
"""Test that ChartFilter rejects user-directory columns."""
with pytest.raises(ValidationError):
ChartFilter(col="created_by_fk", opr="eq", value=42)
def test_chart_filter_rejects_user_directory_columns_other_than_fk() -> None:
"""ChartFilter still rejects user-directory columns that expose names."""
for col in ("created_by_name", "owners", "changed_by"):
with pytest.raises(ValidationError):
ChartFilter.model_validate({"col": col, "opr": "eq", "value": "anything"})
def test_chart_filter_accepts_created_and_changed_by_fk() -> None:
"""ChartFilter allows filtering by created_by_fk / changed_by_fk (user IDs)."""
for col in ("created_by_fk", "changed_by_fk"):
f = ChartFilter.model_validate({"col": col, "opr": "eq", "value": 42})
assert f.col == col
def test_chart_filter_rejects_invalid_column():
"""Test that ChartFilter rejects invalid column names."""
with pytest.raises(ValidationError):
ChartFilter(col="nonexistent_column", opr="eq", value=42)
ChartFilter.model_validate(
{"col": "nonexistent_column", "opr": "eq", "value": 42}
)
def test_dashboard_filter_rejects_created_by_fk():
"""Test that DashboardFilter rejects user-directory columns."""
with pytest.raises(ValidationError):
DashboardFilter(col="created_by_fk", opr="eq", value=42)
def test_dashboard_filter_rejects_user_directory_columns_other_than_fk() -> None:
"""DashboardFilter still rejects user-directory columns that expose names."""
for col in ("created_by_name", "owners", "changed_by"):
with pytest.raises(ValidationError):
DashboardFilter.model_validate(
{"col": col, "opr": "eq", "value": "anything"}
)
def test_dashboard_filter_accepts_created_and_changed_by_fk() -> None:
"""DashboardFilter allows filtering by created_by_fk / changed_by_fk."""
for col in ("created_by_fk", "changed_by_fk"):
f = DashboardFilter.model_validate({"col": col, "opr": "eq", "value": 42})
assert f.col == col
def test_dashboard_filter_rejects_invalid_column():
"""Test that DashboardFilter rejects invalid column names."""
with pytest.raises(ValidationError):
DashboardFilter(col="nonexistent_column", opr="eq", value=42)
DashboardFilter.model_validate(
{"col": "nonexistent_column", "opr": "eq", "value": 42}
)
# ---------------------------------------------------------------------------
@@ -492,12 +524,12 @@ def test_dashboard_filter_rejects_invalid_column():
def test_chart_filter_existing_columns_still_work():
"""Test that pre-existing chart filter columns are not broken."""
for col in ("slice_name", "viz_type", "datasource_name"):
f = ChartFilter(col=col, opr="eq", value="test")
f = ChartFilter.model_validate({"col": col, "opr": "eq", "value": "test"})
assert f.col == col
def test_dashboard_filter_existing_columns_still_work():
"""Test that pre-existing dashboard filter columns are not broken."""
for col in ("dashboard_title", "published", "favorite"):
f = DashboardFilter(col=col, opr="eq", value="test")
f = DashboardFilter.model_validate({"col": col, "opr": "eq", "value": "test"})
assert f.col == col

View File

@@ -326,11 +326,19 @@ class TestGetSchemaToolViaClient:
async def test_get_schema_omits_user_directory_columns(
self, mock_filters, mcp_server
):
"""Test that schema discovery does not advertise user/access fields."""
"""Test that schema discovery does not advertise user/access fields.
created_by_fk and changed_by_fk are intentionally allowed in
filter_columns so callers can filter by user ID resolved via find_users,
but they remain hidden from select_columns and sortable_columns so the
directory itself is never exposed.
"""
mock_filters.return_value = {
"dashboard_title": ["eq", "ilike"],
"owner": ["rel_m_m"],
"published": ["eq"],
"created_by_fk": ["eq", "in"],
"changed_by_fk": ["eq", "in"],
}
async with Client(mcp_server) as client:
@@ -352,9 +360,16 @@ class TestGetSchemaToolViaClient:
"owner",
):
assert field not in select_column_names
assert field not in info["filter_columns"]
assert field not in info["sortable_columns"]
# User-name and relationship fields stay out of filter_columns
for field in ("owners", "roles", "created_by", "changed_by", "owner"):
assert field not in info["filter_columns"]
# ID-only filter columns are advertised so callers can filter via find_users
assert "created_by_fk" in info["filter_columns"]
assert "changed_by_fk" in info["filter_columns"]
class TestGetSchemaEdgeCases:
"""Test edge cases for get_schema tool."""