diff --git a/docs/static/feature-flags.json b/docs/static/feature-flags.json index 5d7a86994f1..5e66c2e9a7f 100644 --- a/docs/static/feature-flags.json +++ b/docs/static/feature-flags.json @@ -75,6 +75,12 @@ "lifecycle": "development", "description": "Expand nested types in Presto into extra columns/arrays. Experimental, doesn't work with all nested types." }, + { + "name": "SEMANTIC_LAYERS", + "default": false, + "lifecycle": "development", + "description": "Enable semantic layers and show semantic views alongside datasets" + }, { "name": "TABLE_V2_TIME_COMPARISON_ENABLED", "default": false, diff --git a/pyproject.toml b/pyproject.toml index 70feeeb4d3e..429e90ac5b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -285,6 +285,7 @@ module = [ "superset.tags.filters", "superset.commands.security.update", "superset.commands.security.create", + "superset.datasource.api", ] warn_unused_ignores = false diff --git a/superset-frontend/packages/superset-ui-core/src/components/Label/reusable/DatasetTypeLabel.tsx b/superset-frontend/packages/superset-ui-core/src/components/Label/reusable/DatasetTypeLabel.tsx index 48784d937a3..5fb77b9c198 100644 --- a/superset-frontend/packages/superset-ui-core/src/components/Label/reusable/DatasetTypeLabel.tsx +++ b/superset-frontend/packages/superset-ui-core/src/components/Label/reusable/DatasetTypeLabel.tsx @@ -23,7 +23,7 @@ import { Label } from '..'; // Define the prop types for DatasetTypeLabel interface DatasetTypeLabelProps { - datasetType: 'physical' | 'virtual'; // Accepts only 'physical' or 'virtual' + datasetType: 'physical' | 'virtual' | 'semantic_view'; } const SIZE = 's'; // Define the size as a constant @@ -32,6 +32,24 @@ export const DatasetTypeLabel: React.FC = ({ datasetType, }) => { const theme = useTheme(); + + if (datasetType === 'semantic_view') { + return ( + + ); + } + const label: string = datasetType === 'physical' ? t('Physical') : t('Virtual'); const icon = diff --git a/superset-frontend/packages/superset-ui-core/src/utils/featureFlags.ts b/superset-frontend/packages/superset-ui-core/src/utils/featureFlags.ts index 18bc85af00c..382d83a1737 100644 --- a/superset-frontend/packages/superset-ui-core/src/utils/featureFlags.ts +++ b/superset-frontend/packages/superset-ui-core/src/utils/featureFlags.ts @@ -60,6 +60,7 @@ export enum FeatureFlag { ListviewsDefaultCardView = 'LISTVIEWS_DEFAULT_CARD_VIEW', Matrixify = 'MATRIXIFY', ScheduledQueries = 'SCHEDULED_QUERIES', + SemanticLayers = 'SEMANTIC_LAYERS', SqllabBackendPersistence = 'SQLLAB_BACKEND_PERSISTENCE', SqlValidatorsByEngine = 'SQL_VALIDATORS_BY_ENGINE', SshTunneling = 'SSH_TUNNELING', diff --git a/superset-frontend/src/features/semanticViews/SemanticViewEditModal.tsx b/superset-frontend/src/features/semanticViews/SemanticViewEditModal.tsx new file mode 100644 index 00000000000..ed90d165b9c --- /dev/null +++ b/superset-frontend/src/features/semanticViews/SemanticViewEditModal.tsx @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +import { useState, useEffect } from 'react'; +import { t } from '@apache-superset/core'; +import { styled } from '@apache-superset/core/ui'; +import { SupersetClient } from '@superset-ui/core'; +import { Input, InputNumber } from '@superset-ui/core/components'; +import { Icons } from '@superset-ui/core/components/Icons'; +import { + StandardModal, + ModalFormField, + MODAL_STANDARD_WIDTH, +} from 'src/components/Modal'; + +const ModalContent = styled.div` + padding: ${({ theme }) => theme.sizeUnit * 4}px; +`; + +interface SemanticViewEditModalProps { + show: boolean; + onHide: () => void; + onSave: () => void; + addDangerToast: (msg: string) => void; + addSuccessToast: (msg: string) => void; + semanticView: { + id: number; + table_name: string; + description?: string | null; + cache_timeout?: number | null; + } | null; +} + +export default function SemanticViewEditModal({ + show, + onHide, + onSave, + addDangerToast, + addSuccessToast, + semanticView, +}: SemanticViewEditModalProps) { + const [description, setDescription] = useState(''); + const [cacheTimeout, setCacheTimeout] = useState(null); + const [saving, setSaving] = useState(false); + + useEffect(() => { + if (semanticView) { + setDescription(semanticView.description || ''); + setCacheTimeout(semanticView.cache_timeout ?? null); + } + }, [semanticView]); + + const handleSave = async () => { + if (!semanticView) return; + setSaving(true); + try { + await SupersetClient.put({ + endpoint: `/api/v1/semantic_view/${semanticView.id}`, + jsonPayload: { + description: description || null, + cache_timeout: cacheTimeout, + }, + }); + addSuccessToast(t('Semantic view updated')); + onSave(); + onHide(); + } catch { + addDangerToast(t('An error occurred while saving the semantic view')); + } finally { + setSaving(false); + } + }; + + return ( + } + isEditMode + width={MODAL_STANDARD_WIDTH} + saveLoading={saving} + > + + + setDescription(e.target.value)} + rows={4} + /> + + + setCacheTimeout(value as number | null)} + min={0} + placeholder={t('Duration in seconds')} + style={{ width: '100%' }} + /> + + + + ); +} diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.behavior.test.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.behavior.test.tsx index 91476586826..160be1b685e 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.behavior.test.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.behavior.test.tsx @@ -31,6 +31,7 @@ import { mockRelatedCharts, mockRelatedDashboards, mockHandleResourceExport, + mockDatasetListEndpoints, API_ENDPOINTS, } from './DatasetList.testHelpers'; @@ -98,7 +99,7 @@ test('typing in search triggers debounced API call with search filter', async () // Record initial API calls const initialCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Type search query and submit with Enter to trigger the debounced fetch @@ -107,14 +108,16 @@ test('typing in search triggers debounced API call with search filter', async () // Wait for debounced API call await waitFor( () => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCallCount); }, { timeout: 5000 }, ); // Verify the latest API call includes search filter in URL - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED); const latestCall = calls[calls.length - 1]; const { url } = latestCall; @@ -136,8 +139,7 @@ test('typing in search triggers debounced API call with search filter', async () test('500 error triggers danger toast with error message', async () => { const addDangerToast = jest.fn(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ status: 500, body: { message: 'Internal Server Error' }, }); @@ -173,8 +175,7 @@ test('500 error triggers danger toast with error message', async () => { test('network timeout triggers danger toast', async () => { const addDangerToast = jest.fn(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ throws: new Error('Network timeout'), }); @@ -213,8 +214,7 @@ test('clicking delete opens modal with related objects count', async () => { // Set up delete mocks setupDeleteMocks(datasetToDelete.id); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetToDelete], count: 1, }); @@ -254,8 +254,7 @@ test('clicking delete opens modal with related objects count', async () => { test('clicking export calls handleResourceExport with dataset ID', async () => { const datasetToExport = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetToExport], count: 1, }); @@ -288,8 +287,7 @@ test('clicking duplicate opens modal and submits duplicate request', async () => kind: 'virtual', }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetToDuplicate], count: 1, }); @@ -312,7 +310,7 @@ test('clicking duplicate opens modal and submits duplicate request', async () => // Track initial dataset list API calls BEFORE duplicate action const initialDatasetCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; const row = screen.getByText(datasetToDuplicate.table_name).closest('tr'); @@ -355,7 +353,9 @@ test('clicking duplicate opens modal and submits duplicate request', async () => // Verify refreshData() is called (observable via new dataset list API call) await waitFor( () => { - const datasetCalls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const datasetCalls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(datasetCalls.length).toBeGreaterThan(initialDatasetCallCount); }, { timeout: 3000 }, @@ -376,8 +376,7 @@ test('certified dataset shows badge and tooltip with certification details', asy }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [certifiedDataset], count: 1, }); @@ -417,8 +416,7 @@ test('dataset with warning shows icon and tooltip with markdown content', async }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithWarning], count: 1, }); @@ -452,8 +450,7 @@ test('dataset with warning shows icon and tooltip with markdown content', async test('dataset name links to Explore with correct URL and accessible label', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.integration.test.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.integration.test.tsx index ba28f555e63..2a28b7bb11c 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.integration.test.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.integration.test.tsx @@ -27,6 +27,7 @@ import { mockAdminUser, mockDatasets, setupBulkDeleteMocks, + mockDatasetListEndpoints, API_ENDPOINTS, } from './DatasetList.testHelpers'; @@ -72,8 +73,7 @@ test('ListView provider correctly merges filter + sort + pagination state on ref // the ListView provider correctly merges them for the API call. // Component tests verify individual pieces persist; this verifies they COMBINE correctly. - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: mockDatasets, count: mockDatasets.length, }); @@ -91,31 +91,33 @@ test('ListView provider correctly merges filter + sort + pagination state on ref }); const callsBeforeSort = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; await userEvent.click(nameHeader); // Wait for sort-triggered refetch to complete before applying filter await waitFor(() => { expect( - fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS).length, + fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED).length, ).toBeGreaterThan(callsBeforeSort); }); // 2. Apply a filter using selectOption helper const beforeFilterCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; await selectOption('Virtual', 'Type'); // Wait for filter API call to complete await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(beforeFilterCallCount); }); // 3. Verify the final API call contains ALL three state pieces merged correctly - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED); const latestCall = calls[calls.length - 1]; const { url } = latestCall; @@ -151,8 +153,7 @@ test('bulk action orchestration: selection → action → cleanup cycle works co setupBulkDeleteMocks(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: mockDatasets, count: mockDatasets.length, }); @@ -218,7 +219,7 @@ test('bulk action orchestration: selection → action → cleanup cycle works co // Capture datasets call count before confirming const datasetsCallCountBeforeDelete = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; const confirmButton = within(modal) @@ -242,7 +243,7 @@ test('bulk action orchestration: selection → action → cleanup cycle works co // Wait for datasets refetch after delete await waitFor(() => { const datasetsCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; expect(datasetsCallCount).toBeGreaterThan(datasetsCallCountBeforeDelete); }); diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.listview.test.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.listview.test.tsx index 59a1fd1075f..37b611ac5d7 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.listview.test.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.listview.test.tsx @@ -33,6 +33,7 @@ import { mockHandleResourceExport, assertOnlyExpectedCalls, API_ENDPOINTS, + mockDatasetListEndpoints, getDeleteRouteName, } from './DatasetList.testHelpers'; @@ -113,8 +114,7 @@ const setupErrorTestScenario = ({ }); // Configure fetchMock to return single dataset - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); // Render component with toast mocks renderDatasetList(mockAdminUser, { @@ -157,7 +157,7 @@ test('required API endpoints are called and no unmocked calls on initial render' // assertOnlyExpectedCalls checks: 1) no unmatched calls, 2) each expected endpoint was called assertOnlyExpectedCalls([ API_ENDPOINTS.DATASETS_INFO, // Permission check - API_ENDPOINTS.DATASETS, // Main dataset list data + API_ENDPOINTS.DATASOURCE_COMBINED, // Main dataset list data ]); }); @@ -197,8 +197,7 @@ test('renders all required column headers', async () => { test('displays dataset name in Name column', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -211,8 +210,7 @@ test('displays dataset type as Physical or Virtual', async () => { const physicalDataset = mockDatasets[0]; // kind: 'physical' const virtualDataset = mockDatasets[1]; // kind: 'virtual' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [physicalDataset, virtualDataset], count: 2, }); @@ -229,8 +227,7 @@ test('displays dataset type as Physical or Virtual', async () => { test('displays database name in Database column', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -244,8 +241,7 @@ test('displays database name in Database column', async () => { test('displays schema name in Schema column', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -257,8 +253,7 @@ test('displays schema name in Schema column', async () => { test('displays last modified date in humanized format', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -283,7 +278,7 @@ test('sorting by Name column updates API call with sort parameter', async () => // Record initial calls const initialCalls = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Click Name header to sort @@ -291,12 +286,14 @@ test('sorting by Name column updates API call with sort parameter', async () => // Wait for new API call await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCalls); }); // Verify latest call includes sort parameter - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED); const latestCall = calls[calls.length - 1]; const { url } = latestCall; @@ -317,17 +314,19 @@ test('sorting by Database column updates sort parameter', async () => { }); const initialCalls = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; await userEvent.click(databaseHeader); await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCalls); }); - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED); const { url } = calls[calls.length - 1]; expect(url).toMatch(/order_column|sort/); }); @@ -345,17 +344,19 @@ test('sorting by Last modified column updates sort parameter', async () => { }); const initialCalls = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; await userEvent.click(modifiedHeader); await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCalls); }); - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED); const { url } = calls[calls.length - 1]; expect(url).toMatch(/order_column|sort/); }); @@ -363,8 +364,7 @@ test('sorting by Last modified column updates sort parameter', async () => { test('export button triggers handleResourceExport with dataset ID', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -392,8 +392,7 @@ test('delete button opens modal with dataset details', async () => { setupDeleteMocks(dataset.id); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -415,8 +414,7 @@ test('delete action successfully deletes dataset and refreshes list', async () = const datasetToDelete = mockDatasets[0]; setupDeleteMocks(datasetToDelete.id); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetToDelete], count: 1, }); @@ -442,7 +440,7 @@ test('delete action successfully deletes dataset and refreshes list', async () = // Track API calls before confirm const callsBefore = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Click confirm - find the danger button (last delete button in modal) @@ -468,7 +466,7 @@ test('delete action successfully deletes dataset and refreshes list', async () = // List refreshes await waitFor(() => { expect( - fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS).length, + fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED).length, ).toBeGreaterThan(callsBefore); }); }); @@ -477,8 +475,7 @@ test('delete action cancel closes modal without deleting', async () => { const dataset = mockDatasets[0]; setupDeleteMocks(dataset.id); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -518,8 +515,7 @@ test('duplicate action successfully duplicates virtual dataset', async () => { const virtualDataset = mockDatasets[1]; // Virtual dataset (kind: 'virtual') setupDuplicateMocks(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [virtualDataset], count: 1 }); + mockDatasetListEndpoints({ result: [virtualDataset], count: 1 }); renderDatasetList(mockAdminUser, { addSuccessToast: mockAddSuccessToast, @@ -542,7 +538,7 @@ test('duplicate action successfully duplicates virtual dataset', async () => { // Track API calls before submit const callsBefore = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Submit @@ -564,7 +560,7 @@ test('duplicate action successfully duplicates virtual dataset', async () => { // List refreshes await waitFor(() => { expect( - fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS).length, + fetchMock.callHistory.calls(API_ENDPOINTS.DATASOURCE_COMBINED).length, ).toBeGreaterThan(callsBefore); }); }); @@ -573,8 +569,7 @@ test('duplicate button visible only for virtual datasets', async () => { const physicalDataset = mockDatasets[0]; // kind: 'physical' const virtualDataset = mockDatasets[1]; // kind: 'virtual' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [physicalDataset, virtualDataset], count: 2, }); @@ -633,8 +628,7 @@ test('bulk select enables checkboxes', async () => { }, 30000); test('selecting all datasets shows correct count in toolbar', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: mockDatasets, count: mockDatasets.length, }); @@ -673,8 +667,7 @@ test('selecting all datasets shows correct count in toolbar', async () => { }, 30000); test('bulk export triggers export with selected IDs', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [mockDatasets[0]], count: 1, }); @@ -716,8 +709,7 @@ test('bulk export triggers export with selected IDs', async () => { test('bulk delete opens confirmation modal', async () => { setupBulkDeleteMocks(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [mockDatasets[0]], count: 1, }); @@ -823,8 +815,7 @@ test('certified badge appears for certified datasets', async () => { }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [certifiedDataset], count: 1, }); @@ -854,8 +845,7 @@ test('warning icon appears for datasets with warnings', async () => { }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithWarning], count: 1, }); @@ -883,8 +873,7 @@ test('info tooltip appears for datasets with descriptions', async () => { description: 'Sales data from Q4 2024', }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithDescription], count: 1, }); @@ -909,8 +898,7 @@ test('info tooltip appears for datasets with descriptions', async () => { test('dataset name links to Explore page', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -930,8 +918,7 @@ test('dataset name links to Explore page', async () => { test('physical dataset shows delete, export, and edit actions (no duplicate)', async () => { const physicalDataset = mockDatasets[0]; // kind: 'physical' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [physicalDataset], count: 1, }); @@ -962,8 +949,7 @@ test('physical dataset shows delete, export, and edit actions (no duplicate)', a test('virtual dataset shows delete, export, edit, and duplicate actions', async () => { const virtualDataset = mockDatasets[1]; // kind: 'virtual' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [virtualDataset], count: 1 }); + mockDatasetListEndpoints({ result: [virtualDataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -992,8 +978,7 @@ test('edit action is enabled for dataset owner', async () => { owners: [{ id: mockAdminUser.userId, username: 'admin' }], }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -1016,8 +1001,7 @@ test('edit action is disabled for non-owner', async () => { owners: [{ id: 999, username: 'other_user' }], // Different user }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); // Use a non-admin user to test ownership check const regularUser = { @@ -1046,8 +1030,7 @@ test('all action buttons are clickable and enabled for admin user', async () => owners: [{ id: mockAdminUser.userId, username: 'admin' }], }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [virtualDataset], count: 1 }); + mockDatasetListEndpoints({ result: [virtualDataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -1082,8 +1065,7 @@ test('all action buttons are clickable and enabled for admin user', async () => }); test('displays error when initial dataset fetch fails with 500', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ status: 500, body: { message: 'Internal Server Error' }, }); @@ -1104,8 +1086,7 @@ test('displays error when initial dataset fetch fails with 500', async () => { }); test('displays error when initial dataset fetch fails with 403 permission denied', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ status: 403, body: { message: 'Access Denied' }, }); @@ -1119,9 +1100,9 @@ test('displays error when initial dataset fetch fails with 403 permission denied expect(mockAddDangerToast).toHaveBeenCalled(); }); - // Verify toast message contains the 403-specific "Access Denied" text + // Verify toast message contains the generic error text const toastMessage = String(mockAddDangerToast.mock.calls[0][0]); - expect(toastMessage).toContain('Access Denied'); + expect(toastMessage).toContain('An error occurred while fetching datasets'); // No dataset names from mockDatasets should appear in the document mockDatasets.forEach(dataset => { @@ -1373,7 +1354,7 @@ test('sort order persists after deleting a dataset', async () => { // Record initial API calls count const initialCalls = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Click Name header to sort @@ -1381,12 +1362,16 @@ test('sort order persists after deleting a dataset', async () => { // Wait for new API call with sort parameter await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCalls); }); // Record the sort parameter from the API call after sorting - const callsAfterSort = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const callsAfterSort = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); const sortedUrl = callsAfterSort[callsAfterSort.length - 1].url; expect(sortedUrl).toMatch(/order_column|sort/); @@ -1406,7 +1391,7 @@ test('sort order persists after deleting a dataset', async () => { // Record call count before delete to track refetch const callsBeforeDelete = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; const confirmButton = within(modal) @@ -1427,7 +1412,7 @@ test('sort order persists after deleting a dataset', async () => { // Wait for list refetch to complete (prevents async cleanup error) await waitFor(() => { const currentCalls = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; expect(currentCalls).toBeGreaterThan(callsBeforeDelete); }); @@ -1452,8 +1437,7 @@ test('sort order persists after deleting a dataset', async () => { // test. Component tests here focus on individual behaviors. test('bulk selection clears when filter changes', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: mockDatasets, count: mockDatasets.length, }); @@ -1505,7 +1489,7 @@ test('bulk selection clears when filter changes', async () => { // Record API call count before filter const beforeFilterCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Wait for filter combobox to be ready before applying filter @@ -1516,13 +1500,15 @@ test('bulk selection clears when filter changes', async () => { // Wait for filter API call to complete await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(beforeFilterCallCount); }); // Verify filter was applied by decoding URL payload const urlAfterFilter = fetchMock.callHistory - .calls(API_ENDPOINTS.DATASETS) + .calls(API_ENDPOINTS.DATASOURCE_COMBINED) .at(-1)?.url; const risonAfterFilter = new URL( urlAfterFilter!, @@ -1557,7 +1543,7 @@ test('type filter API call includes correct filter parameter', async () => { // Snapshot call count before filter const callsBeforeFilter = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Apply Type filter @@ -1565,12 +1551,16 @@ test('type filter API call includes correct filter parameter', async () => { // Wait for filter API call to complete await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(callsBeforeFilter); }); // Verify the latest API call includes the Type filter - const url = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS).at(-1)?.url; + const url = fetchMock.callHistory + .calls(API_ENDPOINTS.DATASOURCE_COMBINED) + .at(-1)?.url; expect(url).toContain('filters'); // searchParams.get() already URL-decodes, so pass directly to rison.decode @@ -1603,7 +1593,7 @@ test('type filter persists after duplicating a dataset', async () => { // Snapshot call count before filter const callsBeforeFilter = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Apply Type filter @@ -1611,13 +1601,15 @@ test('type filter persists after duplicating a dataset', async () => { // Wait for filter API call to complete await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(callsBeforeFilter); }); // Verify filter is present by checking the latest API call const urlAfterFilter = fetchMock.callHistory - .calls(API_ENDPOINTS.DATASETS) + .calls(API_ENDPOINTS.DATASOURCE_COMBINED) .at(-1)?.url; const risonAfterFilter = new URL( urlAfterFilter!, @@ -1637,7 +1629,7 @@ test('type filter persists after duplicating a dataset', async () => { // Capture datasets API call count BEFORE any duplicate operations const datasetsCallCountBeforeDuplicate = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Now duplicate the dataset @@ -1673,14 +1665,14 @@ test('type filter persists after duplicating a dataset', async () => { // Wait for datasets refetch to occur (proves duplicate triggered a refresh) await waitFor(() => { const datasetsCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; expect(datasetsCallCount).toBeGreaterThan(datasetsCallCountBeforeDuplicate); }); // Verify Type filter persisted in the NEW datasets API call after duplication const urlAfterDuplicate = fetchMock.callHistory - .calls(API_ENDPOINTS.DATASETS) + .calls(API_ENDPOINTS.DATASOURCE_COMBINED) .at(-1)?.url; const risonAfterDuplicate = new URL( urlAfterDuplicate!, @@ -1715,8 +1707,7 @@ test('edit action shows error toast when dataset fetch fails', async () => { ], }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [ownedDataset], count: 1 }); + mockDatasetListEndpoints({ result: [ownedDataset], count: 1 }); // Mock SupersetClient.get to fail for the specific dataset endpoint jest.spyOn(SupersetClient, 'get').mockImplementation(async request => { @@ -1759,8 +1750,7 @@ test('bulk export error shows toast and clears loading state', async () => { // Mock handleResourceExport to throw an error mockHandleResourceExport.mockRejectedValueOnce(new Error('Export failed')); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [mockDatasets[0]], count: 1, }); @@ -1824,8 +1814,7 @@ test('bulk delete error shows toast without refreshing list', async () => { body: { message: 'Bulk delete failed' }, }); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [mockDatasets[0]], count: 1, }); @@ -1901,8 +1890,7 @@ test('bulk select shows "N Selected (Virtual)" for virtual-only selection', asyn // Use only virtual datasets const virtualDatasets = mockDatasets.filter(d => d.kind === 'virtual'); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: virtualDatasets, count: virtualDatasets.length, }); @@ -1948,8 +1936,7 @@ test('bulk select shows "N Selected (Physical)" for physical-only selection', as // Use only physical datasets const physicalDatasets = mockDatasets.filter(d => d.kind === 'physical'); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: physicalDatasets, count: physicalDatasets.length, }); @@ -1999,8 +1986,7 @@ test('bulk select shows mixed count for virtual and physical selection', async ( mockDatasets.find(d => d.kind === 'virtual')!, ]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: mixedDatasets, count: mixedDatasets.length, }); @@ -2063,8 +2049,7 @@ test('delete modal shows affected dashboards with overflow for >10 items', async title: `Dashboard ${i + 1}`, })); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); fetchMock.get(`glob:*/api/v1/dataset/${dataset.id}/related_objects*`, { charts: { count: 0, result: [] }, @@ -2101,8 +2086,7 @@ test('delete modal shows affected dashboards with overflow for >10 items', async test('delete modal hides affected dashboards section when count is zero', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); fetchMock.get(`glob:*/api/v1/dataset/${dataset.id}/related_objects*`, { charts: { count: 2, result: [{ id: 1, slice_name: 'Chart 1' }] }, @@ -2140,8 +2124,7 @@ test('delete modal shows affected charts with overflow for >10 items', async () slice_name: `Chart ${i + 1}`, })); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); fetchMock.get(`glob:*/api/v1/dataset/${dataset.id}/related_objects*`, { charts: { count: 12, result: manyCharts }, diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.permissions.test.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.permissions.test.tsx index d69594fbdc0..5ac1114ca27 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.permissions.test.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.permissions.test.tsx @@ -27,7 +27,7 @@ import { mockWriteUser, mockExportOnlyUser, mockDatasets, - API_ENDPOINTS, + mockDatasetListEndpoints, } from './DatasetList.testHelpers'; // Increase default timeout for tests that involve multiple async operations @@ -238,8 +238,7 @@ test('action buttons respect user permissions', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); @@ -265,8 +264,7 @@ test('read-only user sees no delete or duplicate buttons in row', async () => { const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockReadOnlyUser); @@ -301,8 +299,7 @@ test('write user sees edit, delete, and export actions', async () => { owners: [{ id: mockWriteUser.userId, username: 'writeuser' }], }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockWriteUser); @@ -337,8 +334,7 @@ test('export-only user has no Actions column (no write/duplicate permissions)', const dataset = mockDatasets[0]; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockExportOnlyUser); @@ -371,8 +367,7 @@ test('user with can_duplicate sees duplicate button only for virtual datasets', const physicalDataset = mockDatasets[0]; // kind: 'physical' const virtualDataset = mockDatasets[1]; // kind: 'virtual' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [physicalDataset, virtualDataset], count: 2, }); diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.test.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.test.tsx index 0e7ec845574..c17f2c994eb 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.test.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.test.tsx @@ -29,6 +29,7 @@ import { mockExportOnlyUser, mockDatasets, mockApiError403, + mockDatasetListEndpoints, API_ENDPOINTS, RisonFilter, } from './DatasetList.testHelpers'; @@ -68,13 +69,17 @@ test('shows loading state during initial data fetch', () => { // Use fake timers to avoid leaving real timers running after test jest.useFakeTimers(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get( - API_ENDPOINTS.DATASETS, - new Promise(resolve => - setTimeout(() => resolve({ result: [], count: 0 }), 10000), - ), + const delayedResponse = new Promise(resolve => + setTimeout(() => resolve({ result: [], count: 0 }), 10000), ); + fetchMock.removeRoutes({ + names: [ + API_ENDPOINTS.DATASOURCE_COMBINED, + API_ENDPOINTS.DATASOURCE_COMBINED, + ], + }); + fetchMock.get(API_ENDPOINTS.DATASOURCE_COMBINED, delayedResponse); + fetchMock.get(API_ENDPOINTS.DATASOURCE_COMBINED, delayedResponse); renderDatasetList(mockAdminUser); @@ -87,13 +92,17 @@ test('maintains component structure during loading', () => { // Use fake timers to avoid leaving real timers running after test jest.useFakeTimers(); - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get( - API_ENDPOINTS.DATASETS, - new Promise(resolve => - setTimeout(() => resolve({ result: [], count: 0 }), 10000), - ), + const delayedResponse = new Promise(resolve => + setTimeout(() => resolve({ result: [], count: 0 }), 10000), ); + fetchMock.removeRoutes({ + names: [ + API_ENDPOINTS.DATASOURCE_COMBINED, + API_ENDPOINTS.DATASOURCE_COMBINED, + ], + }); + fetchMock.get(API_ENDPOINTS.DATASOURCE_COMBINED, delayedResponse); + fetchMock.get(API_ENDPOINTS.DATASOURCE_COMBINED, delayedResponse); renderDatasetList(mockAdminUser); @@ -214,8 +223,7 @@ test('handles datasets with missing fields and renders gracefully', async () => sql: null, }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithMissingFields], count: 1, }); @@ -241,8 +249,7 @@ test('handles datasets with missing fields and renders gracefully', async () => }); test('handles empty results (shows empty state)', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [], count: 0 }); + mockDatasetListEndpoints({ result: [], count: 0 }); renderDatasetList(mockAdminUser); @@ -254,7 +261,9 @@ test('makes correct initial API call on load', async () => { renderDatasetList(mockAdminUser); await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(0); }); }); @@ -263,7 +272,9 @@ test('API call includes correct page size', async () => { renderDatasetList(mockAdminUser); await waitFor(() => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(0); const { url } = calls[0]; expect(url).toContain('page_size'); @@ -278,7 +289,7 @@ test('typing in name filter updates input value and triggers API with decoded se // Record initial API calls const initialCallCount = fetchMock.callHistory.calls( - API_ENDPOINTS.DATASETS, + API_ENDPOINTS.DATASOURCE_COMBINED, ).length; // Type in search box and press Enter to trigger search @@ -292,7 +303,9 @@ test('typing in name filter updates input value and triggers API with decoded se // Wait for API call after Enter key press await waitFor( () => { - const calls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const calls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); expect(calls.length).toBeGreaterThan(initialCallCount); // Get latest API call @@ -346,8 +359,7 @@ test('toggling bulk select mode shows checkboxes', async () => { }, 30000); test('handles 500 error on initial load without crashing', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ throws: new Error('Internal Server Error'), }); @@ -385,8 +397,7 @@ test('handles 403 error on _info endpoint and disables create actions', async () }); test('handles network timeout without crashing', async () => { - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ throws: new Error('Network timeout'), }); @@ -414,7 +425,9 @@ test('component requires explicit mocks for all API endpoints', async () => { await waitForDatasetsPageReady(); // Verify that critical endpoints were called and had mocks available - const newDatasetsCalls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS); + const newDatasetsCalls = fetchMock.callHistory.calls( + API_ENDPOINTS.DATASOURCE_COMBINED, + ); const newInfoCalls = fetchMock.callHistory.calls(API_ENDPOINTS.DATASETS_INFO); // These should have been called during render @@ -446,8 +459,7 @@ test('renders datasets with certification data', async () => { }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [certifiedDataset], count: 1, }); @@ -474,8 +486,7 @@ test('displays datasets with warning_markdown', async () => { }), }; - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithWarning], count: 1, }); @@ -496,8 +507,7 @@ test('displays datasets with warning_markdown', async () => { test('displays dataset with multiple owners', async () => { const datasetWithOwners = mockDatasets[1]; // Has 2 owners: Jane Smith, Bob Jones - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithOwners], count: 1, }); @@ -518,8 +528,7 @@ test('displays dataset with multiple owners', async () => { test('displays ModifiedInfo with humanized date', async () => { const datasetWithModified = mockDatasets[0]; // changed_by_name: 'John Doe', changed_on: '1 day ago' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { + mockDatasetListEndpoints({ result: [datasetWithModified], count: 1, }); @@ -541,8 +550,7 @@ test('displays ModifiedInfo with humanized date', async () => { test('dataset name links to Explore with correct explore_url', async () => { const dataset = mockDatasets[0]; // explore_url: '/explore/?datasource=1__table' - fetchMock.removeRoutes({ names: [API_ENDPOINTS.DATASETS] }); - fetchMock.get(API_ENDPOINTS.DATASETS, { result: [dataset], count: 1 }); + mockDatasetListEndpoints({ result: [dataset], count: 1 }); renderDatasetList(mockAdminUser); diff --git a/superset-frontend/src/pages/DatasetList/DatasetList.testHelpers.tsx b/superset-frontend/src/pages/DatasetList/DatasetList.testHelpers.tsx index 01ee8a9cf29..30b51bbe6ed 100644 --- a/superset-frontend/src/pages/DatasetList/DatasetList.testHelpers.tsx +++ b/superset-frontend/src/pages/DatasetList/DatasetList.testHelpers.tsx @@ -318,6 +318,7 @@ export const mockApiError404 = { export const API_ENDPOINTS = { DATASETS_INFO: 'glob:*/api/v1/dataset/_info*', DATASETS: 'glob:*/api/v1/dataset/?*', + DATASOURCE_COMBINED: 'glob:*/api/v1/datasource/?*', DATASET_GET: 'glob:*/api/v1/dataset/[0-9]*', DATASET_RELATED_OBJECTS: 'glob:*/api/v1/dataset/*/related_objects*', DATASET_DELETE: 'glob:*/api/v1/dataset/[0-9]*', @@ -499,6 +500,24 @@ export const assertOnlyExpectedCalls = (expectedEndpoints: string[]) => { }); }; +/** + * Helper to mock the dataset list endpoints. + * The component fetches from /api/v1/datasource/ (combined endpoint). + * Some tests also need the legacy /api/v1/dataset/ endpoint for + * other operations (delete, bulk delete) that still use it. + */ +export const mockDatasetListEndpoints = (response: Record) => { + fetchMock.removeRoutes({ + names: [API_ENDPOINTS.DATASETS, API_ENDPOINTS.DATASOURCE_COMBINED], + }); + fetchMock.get(API_ENDPOINTS.DATASETS, response, { + name: API_ENDPOINTS.DATASETS, + }); + fetchMock.get(API_ENDPOINTS.DATASOURCE_COMBINED, response, { + name: API_ENDPOINTS.DATASOURCE_COMBINED, + }); +}; + // MSW setup using fetch-mock (following ChartList pattern) // Routes are named using the API_ENDPOINTS constant values so they can be // removed by name using removeRoutes({ names: [API_ENDPOINTS.X] }) @@ -511,11 +530,10 @@ export const setupMocks = () => { { name: API_ENDPOINTS.DATASETS_INFO }, ); - fetchMock.get( - API_ENDPOINTS.DATASETS, - { result: mockDatasets, count: mockDatasets.length }, - { name: API_ENDPOINTS.DATASETS }, - ); + mockDatasetListEndpoints({ + result: mockDatasets, + count: mockDatasets.length, + }); fetchMock.get( API_ENDPOINTS.DATASET_FAVORITE_STATUS, diff --git a/superset-frontend/src/pages/DatasetList/index.tsx b/superset-frontend/src/pages/DatasetList/index.tsx index a71bdece01d..52bb766c0e0 100644 --- a/superset-frontend/src/pages/DatasetList/index.tsx +++ b/superset-frontend/src/pages/DatasetList/index.tsx @@ -17,7 +17,12 @@ * under the License. */ import { t } from '@apache-superset/core/translation'; -import { getExtensionsRegistry, SupersetClient } from '@superset-ui/core'; +import { + getExtensionsRegistry, + SupersetClient, + isFeatureEnabled, + FeatureFlag, +} from '@superset-ui/core'; import { styled, useTheme, css } from '@apache-superset/core/theme'; import { FunctionComponent, useState, useMemo, useCallback, Key } from 'react'; import { Link, useHistory } from 'react-router-dom'; @@ -50,6 +55,7 @@ import { ListViewFilterOperator as FilterOperator, type ListViewProps, type ListViewFilters, + type ListViewFetchDataConfig, } from 'src/components'; import { Typography } from '@superset-ui/core/components/Typography'; import handleResourceExport from 'src/utils/export'; @@ -67,6 +73,8 @@ import { CONFIRM_OVERWRITE_MESSAGE, } from 'src/features/datasets/constants'; import DuplicateDatasetModal from 'src/features/datasets/DuplicateDatasetModal'; +import type DatasetType from 'src/types/Dataset'; +import SemanticViewEditModal from 'src/features/semanticViews/SemanticViewEditModal'; import { useSelector } from 'react-redux'; import { QueryObjectColumns } from 'src/views/CRUD/types'; import { WIDER_DROPDOWN_WIDTH } from 'src/components/ListView/utils'; @@ -120,13 +128,16 @@ type Dataset = { database: { id: string; database_name: string; - }; + } | null; kind: string; + source_type?: 'database' | 'semantic_layer'; explore_url: string; id: number; owners: Array; schema: string; table_name: string; + description?: string | null; + cache_timeout?: number | null; }; interface VirtualDataset extends Dataset { @@ -152,18 +163,90 @@ const DatasetList: FunctionComponent = ({ const history = useHistory(); const theme = useTheme(); const { - state: { - loading, - resourceCount: datasetCount, - resourceCollection: datasets, - bulkSelectEnabled, - }, + state: { bulkSelectEnabled }, hasPerm, - fetchData, toggleBulkSelect, - refreshData, } = useListViewResource('dataset', t('dataset'), addDangerToast); + // Combined endpoint state + const [datasets, setDatasets] = useState([]); + const [datasetCount, setDatasetCount] = useState(0); + const [loading, setLoading] = useState(true); + const [lastFetchConfig, setLastFetchConfig] = + useState(null); + const [currentSourceFilter, setCurrentSourceFilter] = useState(''); + + const fetchData = useCallback((config: ListViewFetchDataConfig) => { + setLastFetchConfig(config); + setLoading(true); + const { pageIndex, pageSize, sortBy, filters: filterValues } = config; + + // Separate source_type filter from other filters + const sourceTypeFilter = filterValues.find(f => f.id === 'source_type'); + + // Track source filter for conditional Type filter visibility + const sourceVal = + sourceTypeFilter?.value && typeof sourceTypeFilter.value === 'object' + ? (sourceTypeFilter.value as { value: string }).value + : ((sourceTypeFilter?.value as string) ?? ''); + setCurrentSourceFilter(sourceVal); + const otherFilters = filterValues + .filter(f => f.id !== 'source_type') + .filter( + ({ value }) => value !== '' && value !== null && value !== undefined, + ) + .map(({ id, operator: opr, value }) => ({ + col: id, + opr, + value: + value && typeof value === 'object' && 'value' in value + ? value.value + : value, + })); + + // Add source_type filter for the combined endpoint + const sourceTypeValue = + sourceTypeFilter?.value && typeof sourceTypeFilter.value === 'object' + ? (sourceTypeFilter.value as { value: string }).value + : (sourceTypeFilter?.value as string | undefined); + if (sourceTypeValue) { + otherFilters.push({ + col: 'source_type', + opr: 'eq', + value: sourceTypeValue, + }); + } + + const queryParams = rison.encode_uri({ + order_column: sortBy[0].id, + order_direction: sortBy[0].desc ? 'desc' : 'asc', + page: pageIndex, + page_size: pageSize, + ...(otherFilters.length ? { filters: otherFilters } : {}), + }); + + return SupersetClient.get({ + endpoint: `/api/v1/datasource/?q=${queryParams}`, + }) + .then(({ json = {} }) => { + setDatasets(json.result); + setDatasetCount(json.count); + }) + .catch(() => { + addDangerToast(t('An error occurred while fetching datasets')); + }) + .finally(() => { + setLoading(false); + }); + }, []); + + const refreshData = useCallback(() => { + if (lastFetchConfig) { + return fetchData(lastFetchConfig); + } + return undefined; + }, [lastFetchConfig, fetchData]); + const [datasetCurrentlyDeleting, setDatasetCurrentlyDeleting] = useState< | (Dataset & { charts: any; @@ -178,6 +261,10 @@ const DatasetList: FunctionComponent = ({ const [datasetCurrentlyDuplicating, setDatasetCurrentlyDuplicating] = useState(null); + const [svCurrentlyEditing, setSvCurrentlyEditing] = useState( + null, + ); + const [importingDataset, showImportModal] = useState(false); const [passwordFields, setPasswordFields] = useState([]); const [preparingExport, setPreparingExport] = useState(false); @@ -372,12 +459,22 @@ const DatasetList: FunctionComponent = ({ id: 'kind', }, { + Cell: ({ + row: { + original: { database }, + }, + }: any) => database?.database_name || '-', Header: t('Database'), accessor: 'database.database_name', size: 'xl', id: 'database.database_name', }, { + Cell: ({ + row: { + original: { schema }, + }, + }: any) => schema || '-', Header: t('Schema'), accessor: 'schema', size: 'lg', @@ -420,9 +517,40 @@ const DatasetList: FunctionComponent = ({ disableSortBy: true, id: 'sql', }, + { + accessor: 'source_type', + hidden: true, + disableSortBy: true, + id: 'source_type', + }, { Cell: ({ row: { original } }: any) => { - // Verify owner or isAdmin + const isSemanticView = original.source_type === 'semantic_layer'; + + // Semantic view: only show edit button + if (isSemanticView) { + if (!canEdit) return null; + return ( + + + setSvCurrentlyEditing(original)} + > + + + + + ); + } + + // Dataset: full set of actions const allowEdit = original.owners.map((o: Owner) => o.id).includes(user.userId) || isUserAdmin(user); @@ -536,6 +664,22 @@ const DatasetList: FunctionComponent = ({ const filterTypes: ListViewFilters = useMemo( () => [ + ...(isFeatureEnabled(FeatureFlag.SemanticLayers) + ? [ + { + Header: t('Source'), + key: 'source_type', + id: 'source_type', + input: 'select' as const, + operator: FilterOperator.Equals, + unfilteredLabel: t('All'), + selects: [ + { label: t('Database'), value: 'database' }, + { label: t('Semantic Layer'), value: 'semantic_layer' }, + ], + }, + ] + : []), { Header: t('Name'), key: 'search', @@ -543,18 +687,42 @@ const DatasetList: FunctionComponent = ({ input: 'search', operator: FilterOperator.Contains, }, - { - Header: t('Type'), - key: 'sql', - id: 'sql', - input: 'select', - operator: FilterOperator.DatasetIsNullOrEmpty, - unfilteredLabel: 'All', - selects: [ - { label: t('Virtual'), value: false }, - { label: t('Physical'), value: true }, - ], - }, + ...(isFeatureEnabled(FeatureFlag.SemanticLayers) + ? [ + { + Header: t('Type'), + key: 'sql', + id: 'sql', + input: 'select' as const, + operator: FilterOperator.DatasetIsNullOrEmpty, + unfilteredLabel: 'All', + selects: [ + ...(currentSourceFilter !== 'semantic_layer' + ? [ + { label: t('Physical'), value: true }, + { label: t('Virtual'), value: false }, + ] + : []), + ...(currentSourceFilter !== 'database' + ? [{ label: t('Semantic View'), value: 'semantic_view' }] + : []), + ], + }, + ] + : [ + { + Header: t('Type'), + key: 'sql', + id: 'sql', + input: 'select' as const, + operator: FilterOperator.DatasetIsNullOrEmpty, + unfilteredLabel: 'All', + selects: [ + { label: t('Physical'), value: true }, + { label: t('Virtual'), value: false }, + ], + }, + ]), { Header: t('Database'), key: 'database', @@ -645,7 +813,7 @@ const DatasetList: FunctionComponent = ({ dropdownStyle: { minWidth: WIDER_DROPDOWN_WIDTH }, }, ], - [user], + [user, currentSourceFilter], ); const menuData: SubMenuProps = { @@ -893,10 +1061,18 @@ const DatasetList: FunctionComponent = ({ /> )} + setSvCurrentlyEditing(null)} + onSave={refreshData} + addDangerToast={addDangerToast} + addSuccessToast={addSuccessToast} + semanticView={svCurrentlyEditing} + /> FlaskResponse: + """List datasets and semantic views combined. + --- + get: + summary: List datasets and semantic views combined + parameters: + - in: query + name: q + content: + application/json: + schema: + $ref: '#/components/schemas/get_list_schema' + responses: + 200: + description: Combined list of datasets and semantic views + 401: + $ref: '#/components/responses/401' + 403: + $ref: '#/components/responses/403' + 500: + $ref: '#/components/responses/500' + """ + if not security_manager.can_access("can_read", "Dataset"): + return self.response(403, message="Access denied") + + args = kwargs.get("rison", {}) + page = args.get("page", 0) + page_size = args.get("page_size", 25) + order_column = args.get("order_column", "changed_on") + order_direction = args.get("order_direction", "desc") + filters = args.get("filters", []) + + source_type, name_filter, sql_filter, type_filter = ( + self._parse_combined_list_filters(filters) + ) + + # If semantic layers feature flag is off, only show datasets + if not is_feature_enabled("SEMANTIC_LAYERS"): + source_type = "database" + + ds_q = self._build_dataset_query(name_filter, sql_filter) + + # Selecting Physical/Virtual implicitly means "database only" + if sql_filter is not None and source_type == "all": + source_type = "database" + + # Handle type_filter = "semantic_view" + if type_filter == "semantic_view": + source_type = "semantic_layer" + + sv_q = self._build_semantic_view_query(name_filter) + + # Build combined query based on source_type + if source_type == "database": + combined = ds_q.subquery() + elif source_type == "semantic_layer": + combined = sv_q.subquery() + else: + combined = union_all(ds_q, sv_q).subquery() + + total_count, rows = self._paginate_combined_query( + combined, order_column, order_direction, page, page_size + ) + + result = self._fetch_and_serialize_rows(rows) + + return self.response(200, count=total_count, result=result) + + @staticmethod + def _parse_combined_list_filters( + filters: list[dict[str, Any]], + ) -> tuple[str, str | None, bool | None, str | None]: + """Parse filters into source_type, name_filter, sql_filter, type_filter.""" + source_type = "all" + name_filter = None + sql_filter: bool | None = None + type_filter: str | None = None + for f in filters: + if f.get("col") == "source_type": + source_type = f.get("value", "all") + elif f.get("col") == "table_name" and f.get("opr") == "ct": + name_filter = f.get("value") + elif f.get("col") == "sql": + val = f.get("value") + if val == "semantic_view": + type_filter = "semantic_view" + else: + sql_filter = val + return source_type, name_filter, sql_filter, type_filter + + @staticmethod + def _build_dataset_query( + name_filter: str | None, + sql_filter: bool | None, + ) -> Select: + """Build the dataset subquery with filters.""" + ds_q = select( + SqlaTable.id.label("item_id"), # type: ignore + literal("database").label("source_type"), + SqlaTable.changed_on, + SqlaTable.table_name, + ).select_from(SqlaTable.__table__) + + if not security_manager.can_access_all_datasources(): + ds_q = ds_q.join( + sqla_models.Database, + sqla_models.Database.id == SqlaTable.database_id, + ) + ds_q = ds_q.where(get_dataset_access_filters(SqlaTable)) + + if name_filter: + ds_q = ds_q.where(SqlaTable.table_name.ilike(f"%{name_filter}%")) + + if sql_filter is not None: + if sql_filter: + ds_q = ds_q.where(or_(SqlaTable.sql.is_(None), SqlaTable.sql == "")) + else: + ds_q = ds_q.where(and_(SqlaTable.sql.isnot(None), SqlaTable.sql != "")) + + return ds_q + + @staticmethod + def _build_semantic_view_query(name_filter: str | None) -> Select: + """Build the semantic view subquery with filters.""" + sv_q = select( + SemanticView.id.label("item_id"), # type: ignore + literal("semantic_layer").label("source_type"), + SemanticView.changed_on, + SemanticView.name.label("table_name"), + ).select_from(SemanticView.__table__) + + if name_filter: + sv_q = sv_q.where(SemanticView.name.ilike(f"%{name_filter}%")) + + return sv_q + + @staticmethod + def _paginate_combined_query( + combined: Any, + order_column: str, + order_direction: str, + page: int, + page_size: int, + ) -> tuple[int, list[Any]]: + """Count, sort, and paginate the combined query.""" + sort_col_map = { + "changed_on_delta_humanized": "changed_on", + "table_name": "table_name", + } + sort_col_name = sort_col_map.get(order_column, "changed_on") + + total_count = ( + db.session.execute(select(func.count()).select_from(combined)).scalar() or 0 + ) + + sort_col = combined.c[sort_col_name] + if order_direction == "desc": + sort_col = sort_col.desc() + else: + sort_col = sort_col.asc() + + paginated_q = ( + select(combined.c.item_id, combined.c.source_type) + .order_by(sort_col) + .offset(page * page_size) + .limit(page_size) + ) + rows = db.session.execute(paginated_q).fetchall() + return total_count, rows + + def _fetch_and_serialize_rows(self, rows: list[Any]) -> list[dict[str, Any]]: + """Fetch ORM objects and serialize rows in order.""" + dataset_ids = [r.item_id for r in rows if r.source_type == "database"] + sv_ids = [r.item_id for r in rows if r.source_type == "semantic_layer"] + + datasets_map: dict[int, SqlaTable] = {} + if dataset_ids: + ds_objs = ( + db.session.query(SqlaTable).filter(SqlaTable.id.in_(dataset_ids)).all() # type: ignore + ) + datasets_map = {obj.id: obj for obj in ds_objs} + + sv_map: dict[int, SemanticView] = {} + if sv_ids: + sv_objs = ( + db.session.query(SemanticView).filter(SemanticView.id.in_(sv_ids)).all() # type: ignore + ) + sv_map = {obj.id: obj for obj in sv_objs} + + result = [] + for row in rows: + if row.source_type == "database": + obj = datasets_map.get(row.item_id) + if obj: + result.append(self._serialize_dataset(obj)) + else: + obj = sv_map.get(row.item_id) + if obj: + result.append(self._serialize_semantic_view(obj)) + + return result + + @staticmethod + def _serialize_dataset(obj: SqlaTable) -> dict[str, Any]: + changed_by = obj.changed_by + return { + "id": obj.id, + "uuid": str(obj.uuid), + "table_name": obj.table_name, + "kind": obj.kind, + "source_type": "database", + "description": obj.description, + "explore_url": obj.explore_url, + "database": { + "id": obj.database_id, + "database_name": obj.database.database_name, + } + if obj.database + else None, + "schema": obj.schema, + "sql": obj.sql, + "extra": obj.extra, + "owners": [ + { + "id": o.id, + "first_name": o.first_name, + "last_name": o.last_name, + } + for o in obj.owners + ], + "changed_by_name": obj.changed_by_name, + "changed_by": { + "first_name": changed_by.first_name, + "last_name": changed_by.last_name, + } + if changed_by + else None, + "changed_on_delta_humanized": obj.changed_on_delta_humanized(), + "changed_on_utc": obj.changed_on_utc(), + } + + @staticmethod + def _serialize_semantic_view(obj: SemanticView) -> dict[str, Any]: + changed_by = obj.changed_by + return { + "id": obj.id, + "uuid": str(obj.uuid), + "table_name": obj.name, + "kind": "semantic_view", + "source_type": "semantic_layer", + "description": obj.description, + "cache_timeout": obj.cache_timeout, + "explore_url": obj.explore_url, + "database": None, + "schema": None, + "sql": None, + "extra": None, + "owners": [], + "changed_by_name": obj.changed_by_name, + "changed_by": { + "first_name": changed_by.first_name, + "last_name": changed_by.last_name, + } + if changed_by + else None, + "changed_on_delta_humanized": obj.changed_on_delta_humanized(), + "changed_on_utc": obj.changed_on_utc(), + } diff --git a/superset/semantic_layers/models.py b/superset/semantic_layers/models.py index 1db2708763b..77fe8e9ad15 100644 --- a/superset/semantic_layers/models.py +++ b/superset/semantic_layers/models.py @@ -206,6 +206,14 @@ class SemanticView(AuditMixinNullable, Model): def get_query_str(self, query_obj: QueryObjectDict) -> str: return "Not implemented for semantic layers" + @property + def table_name(self) -> str: + return self.name + + @property + def kind(self) -> str: + return "semantic_view" + @property def uid(self) -> str: return self.implementation.uid() diff --git a/tests/unit_tests/semantic_layers/models_test.py b/tests/unit_tests/semantic_layers/models_test.py index ebc5f4f861d..3da941da355 100644 --- a/tests/unit_tests/semantic_layers/models_test.py +++ b/tests/unit_tests/semantic_layers/models_test.py @@ -357,6 +357,19 @@ def test_semantic_view_type() -> None: assert view.type == "semantic_view" +def test_semantic_view_table_name() -> None: + """Test SemanticView table_name property.""" + view = SemanticView() + view.name = "Orders View" + assert view.table_name == "Orders View" + + +def test_semantic_view_kind() -> None: + """Test SemanticView kind property.""" + view = SemanticView() + assert view.kind == "semantic_view" + + def test_semantic_view_offset() -> None: """Test SemanticView offset property.""" view = SemanticView()