diff --git a/superset-frontend/src/features/alerts/AlertReportModal.test.tsx b/superset-frontend/src/features/alerts/AlertReportModal.test.tsx
index b025a27c018..76a6527d1ad 100644
--- a/superset-frontend/src/features/alerts/AlertReportModal.test.tsx
+++ b/superset-frontend/src/features/alerts/AlertReportModal.test.tsx
@@ -25,6 +25,7 @@ import {
waitFor,
within,
createStore,
+ fireEvent,
} from 'spec/helpers/testing-library';
import reducerIndex from 'spec/helpers/reducerIndex';
import { buildErrorTooltipMessage } from './buildErrorTooltipMessage';
@@ -42,6 +43,14 @@ jest.mock('src/features/databases/state.ts', () => ({
}),
}));
+const mockGetChartDataRequest = jest.fn().mockResolvedValue({
+ json: { result: [{ data: [] }] },
+});
+jest.mock('src/components/Chart/chartAction', () => ({
+ ...jest.requireActual('src/components/Chart/chartAction'),
+ getChartDataRequest: (...args: unknown[]) => mockGetChartDataRequest(...args),
+}));
+
const generateMockPayload = (dashboard = true) => {
const mockPayload = {
active: false,
@@ -128,6 +137,7 @@ fetchMock.get(FETCH_REPORT_WITH_FILTERS_ENDPOINT, {
type: 'Report',
extra: {
dashboard: {
+ anchor: 'TAB_1',
nativeFilters: [
{
nativeFilterId: 'NATIVE_FILTER-abc123',
@@ -184,17 +194,40 @@ fetchMock.get(FETCH_REPORT_OVERWRITE_ENDPOINT, {
},
});
-// Related mocks
+const FETCH_REPORT_INVALID_ANCHOR_ENDPOINT = 'glob:*/api/v1/report/7';
+fetchMock.get(FETCH_REPORT_INVALID_ANCHOR_ENDPOINT, {
+ result: {
+ ...generateMockPayload(true),
+ id: 7,
+ type: 'Report',
+ extra: {
+ dashboard: {
+ anchor: 'TAB_999',
+ nativeFilters: [],
+ },
+ },
+ },
+});
+
+// Related mocks — component uses /api/v1/report/related/* endpoints for both
+// alerts and reports, so we mock both the legacy alert paths and the actual
+// report paths used by the component.
const ownersEndpoint = 'glob:*/api/v1/alert/related/owners?*';
const databaseEndpoint = 'glob:*/api/v1/alert/related/database?*';
const dashboardEndpoint = 'glob:*/api/v1/alert/related/dashboard?*';
const chartEndpoint = 'glob:*/api/v1/alert/related/chart?*';
+const reportDashboardEndpoint = 'glob:*/api/v1/report/related/dashboard?*';
+const reportChartEndpoint = 'glob:*/api/v1/report/related/chart?*';
const tabsEndpoint = 'glob:*/api/v1/dashboard/1/tabs';
fetchMock.get(ownersEndpoint, { result: [] });
fetchMock.get(databaseEndpoint, { result: [] });
fetchMock.get(dashboardEndpoint, { result: [] });
fetchMock.get(chartEndpoint, { result: [{ text: 'table chart', value: 1 }] });
+fetchMock.get(reportDashboardEndpoint, { result: [] });
+fetchMock.get(reportChartEndpoint, {
+ result: [{ text: 'table chart', value: 1 }],
+});
fetchMock.get(
tabsEndpoint,
{
@@ -206,6 +239,11 @@ fetchMock.get(
{ name: tabsEndpoint },
);
+// Chart detail endpoint — called by getChartVisualizationType when a chart is selected
+fetchMock.get('glob:*/api/v1/chart/*', {
+ result: { viz_type: 'table' },
+});
+
// Restore the default tabs route and remove any test-specific overrides.
// Called in afterEach so cleanup runs even when a test fails mid-way.
const restoreDefaultTabsRoute = () => {
@@ -236,6 +274,33 @@ const restoreDefaultTabsRoute = () => {
afterEach(() => {
restoreDefaultTabsRoute();
+
+ // Clear call history so stale counts don't leak between tests
+ fetchMock.clearHistory();
+
+ // Remove test-specific named routes (try/catch — may not exist)
+ for (const name of [
+ 'put-condition',
+ 'put-edit',
+ 'put-extra-dashboard',
+ 'create-post',
+ 'put-dashboard-payload',
+ 'put-report-1',
+ 'put-no-recipients',
+ 'tabs-99',
+ ]) {
+ try {
+ fetchMock.removeRoute(name);
+ } catch {
+ // route may not exist
+ }
+ }
+
+ // Reset chartData mock so stale resolved values don't leak
+ mockGetChartDataRequest.mockReset();
+ mockGetChartDataRequest.mockResolvedValue({
+ json: { result: [{ data: [] }] },
+ });
});
// Create a valid alert with all required fields entered for validation check
@@ -817,6 +882,771 @@ test('renders dashboard filter dropdowns', async () => {
expect(filterOptionDropdown).toBeInTheDocument();
});
+// --------------- PHASE 2: Dashboard/Tab/Filter Interaction Tests ------------------
+
+const tabsWithFilters = {
+ result: {
+ all_tabs: { TAB_1: 'Tab 1', TAB_2: 'Tab 2' },
+ tab_tree: [
+ { title: 'Tab 1', value: 'TAB_1' },
+ { title: 'Tab 2', value: 'TAB_2' },
+ ],
+ native_filters: {
+ all: [
+ {
+ id: 'NATIVE_FILTER-F1',
+ name: 'Country Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'country' }, datasetId: 1 }],
+ adhoc_filters: [],
+ },
+ {
+ id: 'NATIVE_FILTER-F2',
+ name: 'City Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'city' }, datasetId: 2 }],
+ adhoc_filters: [],
+ },
+ ],
+ TAB_1: [
+ {
+ id: 'NATIVE_FILTER-F1',
+ name: 'Country Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'country' }, datasetId: 1 }],
+ adhoc_filters: [],
+ },
+ ],
+ TAB_2: [
+ {
+ id: 'NATIVE_FILTER-F2',
+ name: 'City Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'city' }, datasetId: 2 }],
+ adhoc_filters: [],
+ },
+ ],
+ },
+ },
+};
+
+const noTabsResponse = {
+ result: {
+ all_tabs: {},
+ tab_tree: [],
+ native_filters: {},
+ },
+};
+
+test('dashboard with no tabs disables tab selector', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, noTabsResponse, { name: tabsEndpoint });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ const tabSelector = document.querySelector('.ant-select-disabled');
+ expect(tabSelector).toBeInTheDocument();
+});
+
+test('dashboard with no tabs and no filters hides filter add link', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, noTabsResponse, { name: tabsEndpoint });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for tabs fetch to complete
+ await waitFor(() => {
+ expect(fetchMock.callHistory.calls(tabsEndpoint).length).toBeGreaterThan(0);
+ });
+
+ // Tab selector should be disabled (no tabs)
+ const disabledSelects = document.querySelectorAll('.ant-select-disabled');
+ expect(disabledSelects.length).toBeGreaterThanOrEqual(1);
+
+ // Filter Select should also be disabled (no filter options available)
+ expect(disabledSelects.length).toBeGreaterThanOrEqual(2);
+
+ // "Apply another dashboard filter" link should NOT appear
+ // because noTabsResponse has empty native_filters ({})
+ expect(
+ screen.queryByText(/apply another dashboard filter/i),
+ ).not.toBeInTheDocument();
+});
+
+test('dashboard switching resets tab and filter selections', async () => {
+ // Return dashboard options so user can switch
+ const dashboardOptions = {
+ result: [
+ { text: 'Test Dashboard', value: 1 },
+ { text: 'Other Dashboard', value: 99 },
+ ],
+ count: 2,
+ };
+ fetchMock.removeRoute(dashboardEndpoint);
+ fetchMock.get(dashboardEndpoint, dashboardOptions);
+ fetchMock.removeRoute(reportDashboardEndpoint);
+ fetchMock.get(reportDashboardEndpoint, dashboardOptions);
+
+ // Dashboard 1 has tabs and filters
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ // Dashboard 99 has no tabs
+ const tabs99 = 'glob:*/api/v1/dashboard/99/tabs';
+ fetchMock.get(tabs99, noTabsResponse, { name: tabs99 });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for tabs endpoint call to complete (proves filter data is loaded)
+ await waitFor(
+ () => {
+ expect(fetchMock.callHistory.calls(tabsEndpoint).length).toBeGreaterThan(
+ 0,
+ );
+ },
+ { timeout: 5000 },
+ );
+
+ // Wait for tabs to load from dashboard 1
+ await waitFor(() => {
+ expect(screen.getAllByText(/select tab/i)).toHaveLength(1);
+ });
+
+ // Verify filters are available — this proves dashboard 1 has filter options
+ const filterCombobox = await waitFor(() =>
+ screen.getByRole('combobox', { name: /select filter/i }),
+ );
+
+ // Confirm the filter dropdown has options by opening it
+ fireEvent.mouseDown(filterCombobox);
+ await screen.findByText('Country Filter', {}, { timeout: 5000 });
+ // Close the dropdown by pressing Escape
+ fireEvent.keyDown(filterCombobox, { key: 'Escape' });
+
+ // Switch to "Other Dashboard"
+ const dashboardSelect = screen.getByRole('combobox', {
+ name: /dashboard/i,
+ });
+ userEvent.clear(dashboardSelect);
+ userEvent.type(dashboardSelect, 'Other Dashboard{enter}');
+
+ // Tab selector should reset: "Other Dashboard" has no tabs, so disabled with placeholder
+ await waitFor(
+ () => {
+ expect(screen.getByText(/select a tab/i)).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
+
+ // Filter row should reset to empty (no filter selected)
+ await waitFor(() => {
+ const filterSelects = screen.getAllByRole('combobox', {
+ name: /select filter/i,
+ });
+ // Filter select should have no selected value (placeholder state)
+ filterSelects.forEach(select => {
+ const container = select.closest('.ant-select');
+ expect(
+ container?.querySelector('.ant-select-selection-item'),
+ ).not.toBeInTheDocument();
+ });
+ });
+
+ // Restore dashboard endpoints
+ fetchMock.removeRoute(dashboardEndpoint);
+ fetchMock.get(dashboardEndpoint, { result: [] });
+ fetchMock.removeRoute(reportDashboardEndpoint);
+ fetchMock.get(reportDashboardEndpoint, { result: [] });
+ fetchMock.removeRoute(tabs99);
+}, 45000);
+
+test('different dashboard populates its own tabs and filters', async () => {
+ // Set up a report (id:99) that uses dashboard 99 instead of dashboard 1.
+ // This tests that the component correctly loads tabs and filters for a
+ // different dashboard (the "dashboard B has its own data" case).
+ const FETCH_REPORT_DASH99_ENDPOINT = 'glob:*/api/v1/report/99';
+ fetchMock.get(FETCH_REPORT_DASH99_ENDPOINT, {
+ result: {
+ ...generateMockPayload(true),
+ id: 99,
+ type: 'Report',
+ dashboard: { id: 99, dashboard_title: 'Other Dashboard' },
+ },
+ });
+
+ // Dashboard 99 has its own tabs (Tab Alpha, Tab Beta) and a Region Filter
+ const tabs99Endpoint = 'glob:*/api/v1/dashboard/99/tabs';
+ const dash99Tabs = {
+ result: {
+ all_tabs: { TAB_A: 'Tab Alpha', TAB_B: 'Tab Beta' },
+ tab_tree: [
+ { title: 'Tab Alpha', value: 'TAB_A' },
+ { title: 'Tab Beta', value: 'TAB_B' },
+ ],
+ native_filters: {
+ all: [
+ {
+ id: 'NATIVE_FILTER-R1',
+ name: 'Region Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'region' }, datasetId: 3 }],
+ adhoc_filters: [],
+ },
+ ],
+ TAB_A: [
+ {
+ id: 'NATIVE_FILTER-R1',
+ name: 'Region Filter',
+ filterType: 'filter_select',
+ targets: [{ column: { name: 'region' }, datasetId: 3 }],
+ adhoc_filters: [],
+ },
+ ],
+ TAB_B: [],
+ },
+ },
+ };
+ fetchMock.get(tabs99Endpoint, dash99Tabs, { name: 'tabs-99' });
+
+ const props = generateMockedProps(true, true);
+ const dash99Props = { ...props, alert: { ...validAlert, id: 99 } };
+
+ render(, { useRedux: true });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/other dashboard/i);
+
+ // Wait for dashboard 99 tabs to load — increase timeout because the
+ // component must first fetch /api/v1/report/99, then extract dashboard_id,
+ // then fetch /api/v1/dashboard/99/tabs. This three-step async chain needs
+ // more time in CI.
+ await waitFor(
+ () => {
+ expect(fetchMock.callHistory.calls('tabs-99').length).toBeGreaterThan(0);
+ },
+ { timeout: 10000 },
+ );
+
+ // Tab selector should be enabled (dashboard 99 has tabs)
+ await waitFor(() => {
+ const treeSelect = document.querySelector('.ant-tree-select');
+ expect(treeSelect).toBeInTheDocument();
+ expect(treeSelect).not.toHaveClass('ant-select-disabled');
+ });
+
+ // Filter dropdown should show dashboard 99's "Region Filter"
+ const filterSelect = await waitFor(() =>
+ screen.getByRole('combobox', { name: /select filter/i }),
+ );
+ fireEvent.mouseDown(filterSelect);
+
+ await screen.findByText('Region Filter', {}, { timeout: 5000 });
+
+ // Dashboard 1's filters (Country/City) should NOT appear
+ expect(screen.queryByText('Country Filter')).not.toBeInTheDocument();
+
+ // Cleanup
+ fetchMock.removeRoute(FETCH_REPORT_DASH99_ENDPOINT);
+ fetchMock.removeRoute('tabs-99');
+}, 45000);
+
+test('dashboard tabs fetch failure shows error toast', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, 500, { name: tabsEndpoint });
+
+ const store = createStore({}, reducerIndex);
+
+ render(, {
+ useRedux: true,
+ store,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Tab selector should remain disabled (no tabs loaded)
+ const tabSelector = document.querySelector('.ant-select-disabled');
+ expect(tabSelector).toBeInTheDocument();
+
+ // Verify the tabs request was attempted
+ const tabsCalls = fetchMock.callHistory.calls(tabsEndpoint);
+ expect(tabsCalls.length).toBeGreaterThan(0);
+
+ // Verify danger toast was dispatched for the fetch failure
+ await waitFor(() => {
+ const toasts = (store.getState() as any).messageToasts;
+ expect(toasts.length).toBeGreaterThan(0);
+ expect(
+ toasts.some((t: { text: string }) =>
+ t.text.includes('error retrieving dashboard tabs'),
+ ),
+ ).toBe(true);
+ });
+});
+
+test('switching content type to chart hides tab and filter sections', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Tab selector and filter dropdowns should be visible for dashboard
+ expect(screen.getAllByText(/select tab/i)).toHaveLength(1);
+ expect(
+ screen.getByRole('combobox', { name: /select filter/i }),
+ ).toBeInTheDocument();
+
+ // Switch to chart
+ const contentTypeSelector = screen.getByRole('combobox', {
+ name: /select content type/i,
+ });
+ await comboboxSelect(contentTypeSelector, 'Chart', () =>
+ screen.getByRole('combobox', { name: /chart/i }),
+ );
+
+ // Tab and filter sections should be hidden
+ expect(screen.queryByText(/select tab/i)).not.toBeInTheDocument();
+ expect(
+ screen.queryByRole('combobox', { name: /select filter/i }),
+ ).not.toBeInTheDocument();
+});
+
+test('adding and removing dashboard filter rows', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for filter options to load
+ await waitFor(() => {
+ expect(
+ screen.getByRole('combobox', { name: /select filter/i }),
+ ).toBeInTheDocument();
+ });
+
+ // Should start with 1 filter row
+ const initialFilterSelects = screen.getAllByRole('combobox', {
+ name: /select filter/i,
+ });
+ expect(initialFilterSelects).toHaveLength(1);
+
+ // Click "Apply another dashboard filter"
+ const addFilterButton = screen.getByText(/apply another dashboard filter/i);
+ userEvent.click(addFilterButton);
+
+ // Should now have 2 filter rows
+ await waitFor(() => {
+ expect(
+ screen.getAllByRole('combobox', { name: /select filter/i }),
+ ).toHaveLength(2);
+ });
+
+ // Remove the second filter row by clicking its delete icon
+ const deleteIcons = document.querySelectorAll('.filters-trashcan');
+ expect(deleteIcons.length).toBeGreaterThanOrEqual(2);
+ fireEvent.click(deleteIcons[deleteIcons.length - 1]);
+
+ // Should be back to 1 filter row
+ await waitFor(() => {
+ expect(
+ screen.getAllByRole('combobox', { name: /select filter/i }),
+ ).toHaveLength(1);
+ });
+});
+
+test('alert shows condition section, report does not', () => {
+ // Alert has 5 sections
+ const { unmount } = render(
+ ,
+ { useRedux: true },
+ );
+ expect(screen.getAllByRole('tab')).toHaveLength(5);
+ expect(screen.getByTestId('alert-condition-panel')).toBeInTheDocument();
+ unmount();
+
+ // Report has 4 sections, no condition panel
+ render(, {
+ useRedux: true,
+ });
+ expect(screen.getAllByRole('tab')).toHaveLength(4);
+ expect(screen.queryByTestId('alert-condition-panel')).not.toBeInTheDocument();
+});
+
+test('submit includes conditionNotNull without threshold in alert payload', async () => {
+ // Mock payload returns id:1, so updateResource PUTs to /api/v1/report/1
+ fetchMock.put(
+ 'glob:*/api/v1/report/1',
+ { id: 1, result: {} },
+ { name: 'put-condition' },
+ );
+
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for resource to load and all validation to pass
+ await waitFor(
+ () => {
+ expect(
+ screen.queryAllByRole('img', { name: /check-circle/i }),
+ ).toHaveLength(5);
+ },
+ { timeout: 10000 },
+ );
+
+ // Open condition panel and select "not null"
+ userEvent.click(screen.getByTestId('alert-condition-panel'));
+ await screen.findByText(/smaller than/i);
+ const condition = screen.getByRole('combobox', { name: /condition/i });
+ await comboboxSelect(
+ condition,
+ 'not null',
+ () => screen.getAllByText(/not null/i)[0],
+ );
+
+ // Wait for the threshold input to become disabled after "not null" selection —
+ // in CI the state update from comboboxSelect can lag behind the DOM assertion.
+ await waitFor(() => {
+ expect(screen.getByRole('spinbutton')).toBeDisabled();
+ });
+
+ // Wait for Save to be enabled and click
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ });
+ userEvent.click(screen.getByRole('button', { name: /save/i }));
+
+ // Verify the PUT payload
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('put-condition');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('put-condition');
+ const body = JSON.parse(calls[calls.length - 1].options.body as string);
+ expect(body.validator_type).toBe('not null');
+ expect(body.validator_config_json).toEqual({});
+
+ fetchMock.removeRoute('put-condition');
+}, 45000);
+
+test('edit mode submit uses PUT and excludes read-only fields', async () => {
+ // Mock payload returns id:1, so updateResource PUTs to /api/v1/report/1
+ fetchMock.put(
+ 'glob:*/api/v1/report/1',
+ { id: 1, result: {} },
+ { name: 'put-edit' },
+ );
+
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for resource to load and all validation to pass
+ await waitFor(() => {
+ expect(
+ screen.queryAllByRole('img', { name: /check-circle/i }),
+ ).toHaveLength(5);
+ });
+
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ });
+ userEvent.click(screen.getByRole('button', { name: /save/i }));
+
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('put-edit');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('put-edit');
+ const body = JSON.parse(calls[calls.length - 1].options.body as string);
+
+ // Edit mode strips these read-only fields
+ expect(body).not.toHaveProperty('id');
+ expect(body).not.toHaveProperty('created_by');
+ expect(body).not.toHaveProperty('last_eval_dttm');
+ expect(body).not.toHaveProperty('last_state');
+ expect(body).not.toHaveProperty('last_value');
+ expect(body).not.toHaveProperty('last_value_row_json');
+
+ // Core fields remain
+ expect(body.type).toBe('Alert');
+ expect(body.name).toBe('Test Alert');
+
+ // Recipients from the loaded resource should be in payload
+ expect(body.recipients).toBeDefined();
+ expect(body.recipients.length).toBeGreaterThan(0);
+ expect(body.recipients[0].type).toBe('Email');
+
+ fetchMock.removeRoute('put-edit');
+});
+
+test('edit mode preserves extra.dashboard tab/filter state in payload', async () => {
+ // Report 3 has extra.dashboard.nativeFilters + anchor:'TAB_1'
+ fetchMock.put(
+ 'glob:*/api/v1/report/3',
+ { id: 3, result: {} },
+ { name: 'put-extra-dashboard' },
+ );
+
+ // Provide tabs so TAB_1 is in allTabs and anchor is preserved
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ const props = {
+ ...generateMockedProps(true, true, true),
+ alert: { ...validAlert, id: 3 },
+ };
+
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for Save to be enabled (report type has fewer validation sections)
+ await waitFor(
+ () => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ },
+ { timeout: 10000 },
+ );
+ userEvent.click(screen.getByRole('button', { name: /save/i }));
+
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('put-extra-dashboard');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('put-extra-dashboard');
+ const body = JSON.parse(calls[calls.length - 1].options.body as string);
+
+ // extra.dashboard structure must be preserved in the payload
+ expect(body.extra).toBeDefined();
+ expect(body.extra.dashboard).toBeDefined();
+ expect(body.extra.dashboard.nativeFilters).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({ nativeFilterId: 'NATIVE_FILTER-abc123' }),
+ ]),
+ );
+ expect(body.extra.dashboard.anchor).toBe('TAB_1');
+
+ fetchMock.removeRoute('put-extra-dashboard');
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(
+ tabsEndpoint,
+ { result: { all_tabs: {}, tab_tree: [] } },
+ { name: tabsEndpoint },
+ );
+});
+
+test('create mode submits POST and calls onAdd with response', async () => {
+ fetchMock.post(
+ 'glob:*/api/v1/report/',
+ { id: 100, result: {} },
+ { name: 'create-post' },
+ );
+
+ const props = generateMockedProps(true); // isReport, create mode (no alert)
+ const onAdd = jest.fn();
+ const createProps = { ...props, onAdd };
+
+ render(, { useRedux: true });
+
+ expect(screen.getByText('Add report')).toBeInTheDocument();
+
+ // Fill name — fireEvent.change is instant vs userEvent.type firing 13
+ // individual keystroke events, keeping this test well under the 20s timeout.
+ const nameInput = screen.getByPlaceholderText(/enter report name/i);
+ fireEvent.change(nameInput, { target: { value: 'My New Report' } });
+
+ // Open contents panel — content type defaults to Dashboard
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByRole('combobox', { name: /select content type/i });
+
+ // Switch content type to Chart (default is Dashboard)
+ const contentTypeSelect = screen.getByRole('combobox', {
+ name: /select content type/i,
+ });
+ userEvent.click(contentTypeSelect);
+ const chartOption = await screen.findByText('Chart');
+ userEvent.click(chartOption);
+
+ // Select a chart from the chart combobox
+ const chartSelect = await screen.findByRole('combobox', {
+ name: /chart/i,
+ });
+ userEvent.type(chartSelect, 'table');
+ const tableChart = await screen.findByText('table chart');
+ userEvent.click(tableChart);
+
+ // Open notification panel and set recipient email
+ userEvent.click(screen.getByTestId('notification-method-panel'));
+ const recipientInput = await screen.findByTestId('recipients');
+ fireEvent.change(recipientInput, { target: { value: 'test@example.com' } });
+
+ // Wait for Add button to be enabled (use exact name to avoid matching
+ // "Add CC Recipients" and "Add BCC Recipients" buttons)
+ await waitFor(
+ () => {
+ expect(screen.getByRole('button', { name: 'Add' })).toBeEnabled();
+ },
+ { timeout: 5000 },
+ );
+
+ // Click Add
+ userEvent.click(screen.getByRole('button', { name: 'Add' }));
+
+ // Verify POST was called (not PUT)
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('create-post');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('create-post');
+ const body = JSON.parse(calls[0].options.body as string);
+
+ expect(body.type).toBe('Report');
+ expect(body.name).toBe('My New Report');
+ expect(body.chart).toBe(1);
+ // Chart content type means dashboard is null (mutually exclusive)
+ expect(body.dashboard).toBeNull();
+ expect(body.recipients).toBeDefined();
+ expect(body.recipients[0].type).toBe('Email');
+ expect(body.recipients[0].recipient_config_json.target).toBe(
+ 'test@example.com',
+ );
+
+ // Verify onAdd was called with the response id
+ await waitFor(() => {
+ expect(onAdd).toHaveBeenCalledWith(100);
+ });
+
+ fetchMock.removeRoute('create-post');
+}, 45000);
+
+test('create mode defaults to dashboard content type with chart null', async () => {
+ // Coverage strategy: the create-mode POST pathway is tested via the chart
+ // POST test above. The dashboard content payload (dashboard=value, chart=null)
+ // is tested via the edit-mode PUT test below. This test verifies that create
+ // mode reports default to Dashboard content type, completing the coverage:
+ // create POST method ← chart POST test
+ // dashboard payload shape ← edit-mode dashboard PUT test
+ // default content type = Dashboard ← THIS test
+
+ const props = generateMockedProps(true); // isReport, create mode
+ render(, { useRedux: true });
+
+ // Open contents panel
+ userEvent.click(screen.getByTestId('contents-panel'));
+ const contentTypeSelect = await screen.findByRole('combobox', {
+ name: /select content type/i,
+ });
+
+ // Default content type should be "Dashboard" (not "Chart")
+ const selectedItem = contentTypeSelect
+ .closest('.ant-select')
+ ?.querySelector('.ant-select-selection-item');
+ expect(selectedItem).toBeInTheDocument();
+ expect(selectedItem?.textContent).toBe('Dashboard');
+
+ // Dashboard selector should be rendered (not chart selector)
+ expect(
+ screen.getByRole('combobox', { name: /dashboard/i }),
+ ).toBeInTheDocument();
+ expect(
+ screen.queryByRole('combobox', { name: /chart/i }),
+ ).not.toBeInTheDocument();
+});
+
+test('dashboard content type submits dashboard id and null chart', async () => {
+ // Use a custom alert prop with dashboard content from the start,
+ // matching the fetched resource shape (FETCH_DASHBOARD_ENDPOINT).
+ const dashboardAlert: AlertObject = {
+ ...validAlert,
+ id: 1,
+ dashboard_id: 1,
+ chart_id: 0,
+ dashboard: {
+ id: 1,
+ value: 1,
+ label: 'Test Dashboard',
+ dashboard_title: 'Test Dashboard',
+ } as any,
+ chart: undefined as any,
+ };
+
+ fetchMock.put(
+ 'glob:*/api/v1/report/1',
+ { id: 1, result: {} },
+ { name: 'put-dashboard-payload' },
+ );
+
+ const props = generateMockedProps(false, false);
+ const editProps = { ...props, alert: dashboardAlert };
+
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for resource to load and all validation to pass
+ await waitFor(
+ () => {
+ expect(
+ screen.queryAllByRole('img', { name: /check-circle/i }),
+ ).toHaveLength(5);
+ },
+ { timeout: 5000 },
+ );
+
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ });
+ userEvent.click(screen.getByRole('button', { name: /save/i }));
+
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('put-dashboard-payload');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('put-dashboard-payload');
+ const body = JSON.parse(calls[calls.length - 1].options.body as string);
+
+ // Dashboard payload: dashboard field is the numeric ID, chart is null
+ expect(body.dashboard).toBe(1);
+ expect(body.chart).toBeNull();
+ expect(body.name).toBe('Test Alert');
+ expect(body.recipients).toBeDefined();
+
+ fetchMock.removeRoute('put-dashboard-payload');
+});
+
+// --------------- Existing filter reappear test ------------------
+
test('filter reappears in dropdown after clearing with X icon', async () => {
const chartDataEndpoint = 'glob:*/api/v1/chart/data*';
@@ -867,19 +1697,39 @@ test('filter reappears in dropdown after clearing with X icon', async () => {
userEvent.click(screen.getByTestId('contents-panel'));
await screen.findByText(/test dashboard/i);
+ // Wait for tabs endpoint to be called so filter options are populated before
+ // opening the dropdown — in CI the async chain (fetch report → set dashboard →
+ // fetch tabs → set filter options) can lag behind the UI interactions.
+ await waitFor(
+ () => {
+ const tabsCalls = fetchMock.callHistory.calls(tabsEndpoint);
+ expect(tabsCalls.length).toBeGreaterThan(0);
+ },
+ { timeout: 5000 },
+ );
+
const filterDropdown = screen.getByRole('combobox', {
name: /select filter/i,
});
expect(filterDropdown).toBeInTheDocument();
- userEvent.click(filterDropdown);
+ // Value selector should be disabled before any filter is selected
+ const valueSelect = screen.getByRole('combobox', { name: /select value/i });
+ expect(valueSelect.closest('.ant-select')).toHaveClass('ant-select-disabled');
- const filterOption = await waitFor(() => {
- const virtualList = document.querySelector('.rc-virtual-list');
- return within(virtualList as HTMLElement).getByText('Test Filter 1');
- });
+ // Open dropdown and find filter option. Use waitFor to retry mouseDown
+ // because Ant Design Select may not open on the first attempt in CI.
+ let filterOption: HTMLElement;
+ await waitFor(
+ () => {
+ fireEvent.mouseDown(filterDropdown);
+ filterOption = screen.getByText('Test Filter 1');
+ expect(filterOption).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
- userEvent.click(filterOption);
+ userEvent.click(filterOption!);
await waitFor(() => {
const selectionItem = document.querySelector(
@@ -888,6 +1738,13 @@ test('filter reappears in dropdown after clearing with X icon', async () => {
expect(selectionItem).toBeInTheDocument();
});
+ // After selecting a filter, getChartDataRequest resolves and value selector becomes enabled
+ await waitFor(() => {
+ expect(valueSelect.closest('.ant-select')).not.toHaveClass(
+ 'ant-select-disabled',
+ );
+ });
+
const selectContainer = filterDropdown.closest('.ant-select');
const clearIcon = selectContainer?.querySelector(
@@ -903,14 +1760,24 @@ test('filter reappears in dropdown after clearing with X icon', async () => {
expect(selectionItem).not.toBeInTheDocument();
});
- userEvent.click(filterDropdown);
+ // After clearing, value selector should be disabled again (optionFilterValues reset)
await waitFor(() => {
- const virtualList = document.querySelector('.rc-virtual-list');
- expect(
- within(virtualList as HTMLElement).getByText('Test Filter 1'),
- ).toBeInTheDocument();
+ expect(valueSelect.closest('.ant-select')).toHaveClass(
+ 'ant-select-disabled',
+ );
});
-});
+
+ // Re-open the dropdown — the filter should reappear as an option.
+ // Use waitFor to retry the mouseDown + text check because Ant Design
+ // may still be processing the clear event internally.
+ await waitFor(
+ () => {
+ fireEvent.mouseDown(filterDropdown);
+ expect(screen.getByText('Test Filter 1')).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
+}, 45000);
const setupAnchorMocks = (
nativeFilters: Record,
@@ -932,7 +1799,7 @@ const setupAnchorMocks = (
const tabs = tabsOverride ?? defaultTabs;
// Clear call history so waitFor assertions don't match calls from prior tests.
- fetchMock.callHistory.clear();
+ fetchMock.clearHistory();
// Only replace the named routes that need anchor-specific overrides;
// unnamed related-endpoint routes (owners, database, etc.) stay intact.
@@ -1101,6 +1968,17 @@ test('stale JSON array anchor is cleared without crash or toast', async () => {
).toBe(true);
});
+ // Wait for the anchor state to be cleared. The .then() callback calls
+ // updateAnchorState(undefined) after finding that not all elements in
+ // the JSON array anchor are valid tabs. When the anchor is cleared,
+ // the tab selector shows its placeholder text instead of a selected value.
+ await waitFor(
+ () => {
+ expect(screen.getByText(/select a tab/i)).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
+
// No error toast dispatched (the .then() handler ran without crashing)
const toasts = (store.getState() as Record)
.messageToasts as { text: string }[];
@@ -1340,15 +2218,33 @@ test('anchor tab with scoped filters loads filter options correctly', async () =
userEvent.click(screen.getByTestId('contents-panel'));
await screen.findByText(/test dashboard/i);
+ // Wait for the tabs fetch to complete so filter options are populated
+ await waitFor(
+ () => {
+ expect(
+ fetchMock.callHistory
+ .calls()
+ .some(c => c.url.includes('/dashboard/1/tabs')),
+ ).toBe(true);
+ },
+ { timeout: 5000 },
+ );
+
const filterDropdown = screen.getByRole('combobox', {
name: /select filter/i,
});
- userEvent.click(filterDropdown);
-
- const filterOption = await screen.findByRole('option', {
- name: /Tab Scoped Filter/,
- });
- expect(filterOption).toBeInTheDocument();
+ // Use waitFor with mouseDown retry — the dropdown may not open or
+ // populate on the first attempt if the .then() callback hasn't
+ // finished setting filter options yet.
+ await waitFor(
+ () => {
+ fireEvent.mouseDown(filterDropdown);
+ expect(
+ screen.getByRole('option', { name: /Tab Scoped Filter/ }),
+ ).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
const toasts = (store.getState() as Record)
.messageToasts as { text: string }[];
@@ -1488,3 +2384,288 @@ test('tabs metadata overwrites seeded filter options', async () => {
within(selectContainer).queryByTitle('Country'),
).not.toBeInTheDocument();
});
+
+test('selecting filter triggers chart data request with correct params', async () => {
+ mockGetChartDataRequest.mockReset();
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ mockGetChartDataRequest.mockResolvedValue({
+ json: { result: [{ data: [{ country: 'US' }, { country: 'UK' }] }] },
+ });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for filter dropdown to be available
+ const filterDropdown = await waitFor(() =>
+ screen.getByRole('combobox', { name: /select filter/i }),
+ );
+
+ // Select the Country Filter using comboboxSelect pattern
+ await comboboxSelect(filterDropdown, 'Country Filter', () =>
+ document.querySelector(
+ '.ant-select-selection-item[title="Country Filter"]',
+ ),
+ );
+
+ // getChartDataRequest should have been called for filter values
+ await waitFor(() => {
+ expect(mockGetChartDataRequest).toHaveBeenCalledTimes(1);
+ });
+
+ // Verify it was called with correct datasource and groupby
+ const callArgs = mockGetChartDataRequest.mock.calls[0][0];
+ expect(callArgs.formData.groupby).toEqual(['country']);
+ expect(callArgs.formData.datasource).toBe('1__table');
+
+ mockGetChartDataRequest.mockReset();
+});
+
+test('selected filter excluded from other row dropdowns', async () => {
+ mockGetChartDataRequest.mockReset();
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ mockGetChartDataRequest.mockResolvedValue({
+ json: { result: [{ data: [{ country: 'US' }] }] },
+ });
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for tabs endpoint to complete so filter options are populated
+ await waitFor(
+ () => {
+ expect(fetchMock.callHistory.calls(tabsEndpoint).length).toBeGreaterThan(
+ 0,
+ );
+ },
+ { timeout: 5000 },
+ );
+
+ // Wait for filter dropdown
+ const filterDropdown = await waitFor(() =>
+ screen.getByRole('combobox', { name: /select filter/i }),
+ );
+
+ // Select Country Filter in row 1
+ await comboboxSelect(filterDropdown, 'Country Filter', () =>
+ document.querySelector(
+ '.ant-select-selection-item[title="Country Filter"]',
+ ),
+ );
+
+ // Wait for getChartDataRequest to complete AND state update to propagate.
+ // The value dropdown becomes non-disabled once optionFilterValues is populated.
+ await waitFor(() => {
+ expect(mockGetChartDataRequest).toHaveBeenCalled();
+ });
+ await waitFor(
+ () => {
+ const valueSelects = screen.queryAllByRole('combobox', {
+ name: /select value/i,
+ });
+ expect(valueSelects.length).toBeGreaterThan(0);
+ },
+ { timeout: 5000 },
+ );
+
+ // Add second filter row
+ const addFilterButton = screen.getByText(/apply another dashboard filter/i);
+ userEvent.click(addFilterButton);
+
+ // Wait for second row
+ await waitFor(() => {
+ expect(
+ screen.getAllByRole('combobox', { name: /select filter/i }),
+ ).toHaveLength(2);
+ });
+
+ // Open filter dropdown in row 2 — use fireEvent.mouseDown because
+ // Ant Design Select opens on mouseDown, not click.
+ const filterDropdowns = screen.getAllByRole('combobox', {
+ name: /select filter/i,
+ });
+ fireEvent.mouseDown(filterDropdowns[1]);
+
+ // Country Filter should be excluded (dedup), City Filter should remain.
+ // Scope to the last dropdown popup to avoid matching selected items in row 1.
+ await waitFor(
+ () => {
+ const dropdowns = document.querySelectorAll('.ant-select-dropdown');
+ const lastDropdown = dropdowns[dropdowns.length - 1];
+ expect(
+ within(lastDropdown as HTMLElement).queryByText('Country Filter'),
+ ).not.toBeInTheDocument();
+ expect(
+ within(lastDropdown as HTMLElement).getByText('City Filter'),
+ ).toBeInTheDocument();
+ },
+ { timeout: 10000 },
+ );
+}, 60000);
+
+test('invalid CC email blocks submit', async () => {
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for validation to fully propagate (fetch → state → validate → enforce)
+ await screen.findByText('Edit alert');
+ await waitFor(
+ () => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ },
+ { timeout: 5000 },
+ );
+
+ // Open notification panel and show CC field
+ userEvent.click(screen.getByTestId('notification-method-panel'));
+ const addCcButton = await screen.findByText(/Add CC Recipients/i);
+ userEvent.click(addCcButton);
+
+ // Type invalid email in CC field
+ const ccInput = await screen.findByTestId('cc');
+ userEvent.type(ccInput, 'not-an-email');
+ fireEvent.blur(ccInput);
+
+ // Save should now be disabled due to invalid email format
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeDisabled();
+ });
+});
+
+test('invalid BCC email blocks submit', async () => {
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for validation to fully propagate (fetch → state → validate → enforce)
+ await screen.findByText('Edit alert');
+ await waitFor(
+ () => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+ },
+ { timeout: 5000 },
+ );
+
+ // Open notification panel and show BCC field
+ userEvent.click(screen.getByTestId('notification-method-panel'));
+ const addBccButton = await screen.findByText(/Add BCC Recipients/i);
+ userEvent.click(addBccButton);
+
+ // Type invalid email in BCC field
+ const bccInput = await screen.findByTestId('bcc');
+ userEvent.type(bccInput, 'not-an-email');
+ fireEvent.blur(bccInput);
+
+ // Save should now be disabled due to invalid email format
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeDisabled();
+ });
+});
+
+test('invalid saved anchor is reset on dashboard load', async () => {
+ fetchMock.removeRoute(tabsEndpoint);
+ fetchMock.get(tabsEndpoint, tabsWithFilters, { name: tabsEndpoint });
+
+ const props = generateMockedProps(true, true);
+ const editProps = {
+ ...props,
+ alert: { ...validAlert, id: 7 },
+ };
+
+ render(, {
+ useRedux: true,
+ });
+
+ userEvent.click(screen.getByTestId('contents-panel'));
+ await screen.findByText(/test dashboard/i);
+
+ // Wait for dashboard tabs to load
+ await waitFor(() => {
+ expect(fetchMock.callHistory.calls(tabsEndpoint).length).toBeGreaterThan(0);
+ });
+
+ // The saved anchor 'TAB_999' is NOT in the dashboard's tabs (TAB_1, TAB_2),
+ // so it should be reset to undefined. Tab selector shows placeholder.
+ await waitFor(() => {
+ expect(screen.getByText(/select a tab/i)).toBeInTheDocument();
+ });
+
+ // TAB_999 should NOT appear as a selected value anywhere
+ expect(screen.queryAllByTitle('TAB_999')).toHaveLength(0);
+});
+
+test('clearing notification recipients disables submit and prevents API call', async () => {
+ fetchMock.put(
+ 'glob:*/api/v1/report/2',
+ { id: 2, result: {} },
+ { name: 'put-no-recipients' },
+ );
+
+ render(, {
+ useRedux: true,
+ });
+
+ // Wait for all validation to pass (5 checkmarks = fully valid alert)
+ await waitFor(() => {
+ expect(
+ screen.queryAllByRole('img', { name: /check-circle/i }),
+ ).toHaveLength(5);
+ });
+
+ // Save should be enabled initially
+ expect(screen.getByRole('button', { name: /save/i })).toBeEnabled();
+
+ // Open notification panel and clear the recipients field
+ userEvent.click(screen.getByTestId('notification-method-panel'));
+ const recipientInput = await screen.findByTestId('recipients');
+ userEvent.clear(recipientInput);
+ fireEvent.blur(recipientInput);
+
+ // Save should be disabled — empty recipients block submission
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /save/i })).toBeDisabled();
+ });
+
+ // No PUT was emitted — the disabled button prevented any payload from being sent.
+ // This confirms that a payload with empty recipients is never transmitted.
+ expect(fetchMock.callHistory.calls('put-no-recipients')).toHaveLength(0);
+
+ fetchMock.removeRoute('put-no-recipients');
+});
+
+test('modal reopen resets local state', async () => {
+ const props = generateMockedProps(true, false, true);
+
+ const { unmount } = render(, {
+ useRedux: true,
+ });
+
+ // Type a name to dirty the form
+ const nameInput = screen.getByPlaceholderText(/enter report name/i);
+ await userEvent.type(nameInput, 'Temporary Report');
+ expect(nameInput).toHaveValue('Temporary Report');
+
+ // Click Cancel
+ userEvent.click(screen.getByRole('button', { name: /cancel/i }));
+
+ // Unmount and remount to simulate reopening
+ unmount();
+ render(, { useRedux: true });
+
+ // Fresh mount should have empty name
+ await waitFor(() => {
+ expect(screen.getByPlaceholderText(/enter report name/i)).toHaveValue('');
+ });
+});
diff --git a/superset-frontend/src/features/alerts/components/AlertReportCronScheduler.test.tsx b/superset-frontend/src/features/alerts/components/AlertReportCronScheduler.test.tsx
new file mode 100644
index 00000000000..7f4c55aeec8
--- /dev/null
+++ b/superset-frontend/src/features/alerts/components/AlertReportCronScheduler.test.tsx
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import { useState } from 'react';
+import {
+ render,
+ screen,
+ fireEvent,
+ waitFor,
+} from 'spec/helpers/testing-library';
+import { AlertReportCronScheduler } from './AlertReportCronScheduler';
+
+const defaultProps = {
+ value: '0 12 * * 1',
+ onChange: jest.fn(),
+};
+
+beforeEach(() => {
+ defaultProps.onChange = jest.fn();
+});
+
+test('renders CronPicker by default (picker mode)', () => {
+ render();
+ expect(screen.getByText('Schedule type')).toBeInTheDocument();
+ expect(screen.getByText('Schedule')).toBeInTheDocument();
+ // CronPicker renders combobox elements; CRON text input does not
+ expect(
+ screen.queryByPlaceholderText('CRON expression'),
+ ).not.toBeInTheDocument();
+});
+
+async function switchToCronInputMode() {
+ const scheduleTypeSelect = screen.getByRole('combobox', {
+ name: /Schedule type/i,
+ });
+ fireEvent.mouseDown(scheduleTypeSelect);
+ await waitFor(() => {
+ expect(screen.getByText('CRON Schedule')).toBeInTheDocument();
+ });
+ fireEvent.click(screen.getByText('CRON Schedule'));
+}
+
+test('switches to CRON input mode and shows text input', async () => {
+ render();
+
+ await switchToCronInputMode();
+
+ await waitFor(() => {
+ expect(screen.getByPlaceholderText('CRON expression')).toBeInTheDocument();
+ });
+});
+
+// Controlled wrapper: the component is fully controlled (value from props),
+// so blur/enter tests need a parent that updates value on onChange.
+function ControlledScheduler({
+ initialValue,
+ onChangeSpy,
+}: {
+ initialValue: string;
+ onChangeSpy: jest.Mock;
+}) {
+ const [value, setValue] = useState(initialValue);
+ return (
+ {
+ setValue(v);
+ onChangeSpy(v);
+ }}
+ />
+ );
+}
+
+test('calls onChange on blur in CRON input mode', async () => {
+ const onChangeSpy = jest.fn();
+ render(
+ ,
+ );
+
+ await switchToCronInputMode();
+
+ const input = await screen.findByPlaceholderText('CRON expression');
+ fireEvent.change(input, { target: { value: '*/5 * * * *' } });
+
+ // Clear spy so we only assert the blur-specific call
+ onChangeSpy.mockClear();
+ fireEvent.blur(input);
+
+ expect(onChangeSpy).toHaveBeenCalledTimes(1);
+ expect(onChangeSpy).toHaveBeenCalledWith('*/5 * * * *');
+});
+
+test('calls onChange on Enter key press in CRON input mode', async () => {
+ const onChangeSpy = jest.fn();
+ render(
+ ,
+ );
+
+ await switchToCronInputMode();
+
+ const input = await screen.findByPlaceholderText('CRON expression');
+ fireEvent.change(input, { target: { value: '0 9 * * 1-5' } });
+
+ // Clear spy so we only assert the Enter-specific call
+ onChangeSpy.mockClear();
+ fireEvent.keyDown(input, { key: 'Enter', code: 'Enter' });
+
+ expect(onChangeSpy).toHaveBeenCalledTimes(1);
+ expect(onChangeSpy).toHaveBeenCalledWith('0 9 * * 1-5');
+});
diff --git a/superset-frontend/src/features/alerts/components/NumberInput.test.tsx b/superset-frontend/src/features/alerts/components/NumberInput.test.tsx
new file mode 100644
index 00000000000..4ce6a366d30
--- /dev/null
+++ b/superset-frontend/src/features/alerts/components/NumberInput.test.tsx
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import { render, screen, fireEvent } from 'spec/helpers/testing-library';
+import NumberInput from './NumberInput';
+
+const defaultProps = {
+ timeUnit: 'seconds',
+ min: 0,
+ name: 'timeout',
+ value: '30',
+ placeholder: 'Enter value',
+ onChange: jest.fn(),
+};
+
+test('renders value with timeUnit suffix when not focused', () => {
+ render();
+ const input = screen.getByPlaceholderText('Enter value');
+ expect(input).toHaveValue('30 seconds');
+});
+
+test('strips suffix on focus and restores on blur', () => {
+ render();
+ const input = screen.getByPlaceholderText('Enter value');
+
+ fireEvent.focus(input);
+ expect(input).toHaveValue('30');
+
+ fireEvent.blur(input);
+ expect(input).toHaveValue('30 seconds');
+});
+
+test('renders empty string when value is falsy', () => {
+ render();
+ const input = screen.getByPlaceholderText('Enter value');
+ expect(input).toHaveValue('');
+});
+
+test('renders empty string when value is zero', () => {
+ render();
+ const input = screen.getByPlaceholderText('Enter value');
+ expect(input).toHaveValue('');
+});
+
+test('calls onChange when input changes', () => {
+ const onChange = jest.fn();
+ render();
+ const input = screen.getByPlaceholderText('Enter value');
+
+ fireEvent.change(input, { target: { value: '60' } });
+ expect(onChange).toHaveBeenCalled();
+});
diff --git a/superset-frontend/src/features/reports/ReportModal/ReportModal.test.tsx b/superset-frontend/src/features/reports/ReportModal/ReportModal.test.tsx
index c07381817b7..5fad71c7ad6 100644
--- a/superset-frontend/src/features/reports/ReportModal/ReportModal.test.tsx
+++ b/superset-frontend/src/features/reports/ReportModal/ReportModal.test.tsx
@@ -22,9 +22,10 @@ import {
screen,
userEvent,
waitFor,
+ createStore,
} from 'spec/helpers/testing-library';
+import reducerIndex from 'spec/helpers/reducerIndex';
import { FeatureFlag, VizType, isFeatureEnabled } from '@superset-ui/core';
-import * as actions from 'src/features/reports/ReportModal/actions';
import ReportModal from '.';
const REPORT_ENDPOINT = 'glob:*/api/v1/report*';
@@ -56,115 +57,260 @@ jest.mock('@superset-ui/core', () => ({
}));
const mockedIsFeatureEnabled = isFeatureEnabled as jest.Mock;
-// eslint-disable-next-line no-restricted-globals -- TODO: Migrate from describe blocks
-describe('Email Report Modal', () => {
- beforeEach(() => {
- mockedIsFeatureEnabled.mockImplementation(
- featureFlag => featureFlag === FeatureFlag.AlertReports,
- );
- render(, { useRedux: true });
- });
- test('inputs respond correctly', () => {
- // ----- Report name textbox
- // Initial value
- const reportNameTextbox = screen.getByTestId('report-name-test');
- expect(reportNameTextbox).toHaveDisplayValue('Weekly Report');
- // Type in the textbox and assert that it worked
- userEvent.clear(reportNameTextbox);
- userEvent.type(reportNameTextbox, 'Report name text test');
- expect(reportNameTextbox).toHaveDisplayValue('Report name text test');
-
- // ----- Report description textbox
- // Initial value
- const reportDescriptionTextbox = screen.getByTestId(
- 'report-description-test',
- );
- expect(reportDescriptionTextbox).toHaveDisplayValue('');
- // Type in the textbox and assert that it worked
- userEvent.type(reportDescriptionTextbox, 'Report description text test');
- expect(reportDescriptionTextbox).toHaveDisplayValue(
- 'Report description text test',
- );
-
- // ----- Crontab
- const crontabInputs = screen.getAllByRole('combobox');
- expect(crontabInputs).toHaveLength(5);
- });
-
- test('does not allow user to create a report without a name', () => {
- // Grab name textbox and add button
- const reportNameTextbox = screen.getByTestId('report-name-test');
- const addButton = screen.getByRole('button', { name: /add/i });
-
- // Add button should be enabled while name textbox has text
- expect(reportNameTextbox).toHaveDisplayValue('Weekly Report');
- expect(addButton).toBeEnabled();
-
- // Clear the text from the name textbox
- userEvent.clear(reportNameTextbox);
-
- // Add button should now be disabled, blocking user from creation
- expect(reportNameTextbox).toHaveDisplayValue('');
- expect(addButton).toBeDisabled();
- });
-
- // eslint-disable-next-line no-restricted-globals -- TODO: Migrate from describe blocks
- describe('Email Report Modal', () => {
- let dispatch: any;
-
- beforeEach(async () => {
- dispatch = jest.fn();
- });
-
- test('creates a new email report', async () => {
- // ---------- Render/value setup ----------
- const reportValues = {
- id: 1,
- result: {
- active: true,
- creation_method: 'dashboards',
- crontab: '0 12 * * 1',
- dashboard: 1,
- name: 'Weekly Report',
- owners: [1],
- recipients: [
- {
- recipient_config_json: {
- target: 'test@test.com',
- },
- type: 'Email',
- },
- ],
- type: 'Report',
- },
- };
- // This is needed to structure the reportValues to match the fetchMock return
- const stringyReportValues = `{"id":1,"result":{"active":true,"creation_method":"dashboards","crontab":"0 12 * * 1","dashboard":${1},"name":"Weekly Report","owners":[${1}],"recipients":[{"recipient_config_json":{"target":"test@test.com"},"type":"Email"}],"type":"Report"}}`;
- // Watch for report POST
- fetchMock.post(REPORT_ENDPOINT, reportValues);
-
- // Click "Add" button to create a new email report
- const addButton = screen.getByRole('button', { name: /add/i });
- await waitFor(() => userEvent.click(addButton));
-
- // Mock addReport from Redux
- const makeRequest = () => {
- const request = actions.addReport(reportValues);
- return request(dispatch);
- };
-
- await makeRequest();
-
- // 🐞 ----- There are 2 POST calls at this point ----- 🐞
-
- // addReport's mocked POST return should match the mocked values
- expect(fetchMock.callHistory.lastCall()?.options?.body).toEqual(
- stringyReportValues,
- );
- expect(dispatch).toHaveBeenCalledTimes(2);
- const reportCalls = fetchMock.callHistory.calls(REPORT_ENDPOINT);
- expect(reportCalls).toHaveLength(2);
- });
- });
+beforeEach(() => {
+ mockedIsFeatureEnabled.mockImplementation(
+ featureFlag => featureFlag === FeatureFlag.AlertReports,
+ );
+});
+
+test('inputs respond correctly', () => {
+ render(, { useRedux: true });
+ // ----- Report name textbox
+ const reportNameTextbox = screen.getByTestId('report-name-test');
+ expect(reportNameTextbox).toHaveDisplayValue('Weekly Report');
+ userEvent.clear(reportNameTextbox);
+ userEvent.type(reportNameTextbox, 'Report name text test');
+ expect(reportNameTextbox).toHaveDisplayValue('Report name text test');
+
+ // ----- Report description textbox
+ const reportDescriptionTextbox = screen.getByTestId(
+ 'report-description-test',
+ );
+ expect(reportDescriptionTextbox).toHaveDisplayValue('');
+ userEvent.type(reportDescriptionTextbox, 'Report description text test');
+ expect(reportDescriptionTextbox).toHaveDisplayValue(
+ 'Report description text test',
+ );
+
+ // ----- Crontab
+ const crontabInputs = screen.getAllByRole('combobox');
+ expect(crontabInputs).toHaveLength(5);
+});
+
+test('does not allow user to create a report without a name', () => {
+ render(, { useRedux: true });
+ const reportNameTextbox = screen.getByTestId('report-name-test');
+ const addButton = screen.getByRole('button', { name: /add/i });
+
+ expect(reportNameTextbox).toHaveDisplayValue('Weekly Report');
+ expect(addButton).toBeEnabled();
+
+ userEvent.clear(reportNameTextbox);
+
+ expect(reportNameTextbox).toHaveDisplayValue('');
+ expect(addButton).toBeDisabled();
+});
+
+test('creates a new email report via modal Add button', async () => {
+ fetchMock.post(
+ REPORT_ENDPOINT,
+ { id: 1, result: {} },
+ { name: 'post-report' },
+ );
+
+ render(, { useRedux: true });
+
+ const addButton = screen.getByRole('button', { name: /add/i });
+ await waitFor(() => userEvent.click(addButton));
+
+ // Verify exactly one POST from the modal submit path
+ await waitFor(() => {
+ const postCalls = fetchMock.callHistory.calls('post-report');
+ expect(postCalls).toHaveLength(1);
+ });
+
+ const postCalls = fetchMock.callHistory.calls('post-report');
+ const body = JSON.parse(postCalls[0].options.body as string);
+ expect(body.name).toBe('Weekly Report');
+ expect(body.type).toBe('Report');
+ expect(body.creation_method).toBe('dashboards');
+ expect(body.crontab).toBeDefined();
+ expect(body.recipients).toBeDefined();
+ expect(body.recipients[0].type).toBe('Email');
+
+ fetchMock.removeRoute('post-report');
+});
+
+test('text-based chart hides screenshot width and shows message content', () => {
+ // Table is text-based: should show message content but hide custom width
+ const textChartProps = {
+ ...defaultProps,
+ dashboardId: undefined,
+ chart: { id: 1, sliceFormData: { viz_type: VizType.Table } },
+ chartName: 'My Table Chart',
+ creationMethod: 'charts' as const,
+ };
+ render(, { useRedux: true });
+
+ // Message content section should be visible
+ expect(screen.getByText('Message content')).toBeInTheDocument();
+ expect(screen.getByText(/Text embedded in email/i)).toBeInTheDocument();
+
+ // Screenshot width should NOT be visible for text-based chart
+ expect(screen.queryByText('Screenshot width')).not.toBeInTheDocument();
+});
+
+test('non-text chart shows screenshot width and message content', () => {
+ const lineChartProps = {
+ ...defaultProps,
+ dashboardId: undefined,
+ chart: { id: 1, sliceFormData: { viz_type: VizType.Line } },
+ chartName: 'My Line Chart',
+ creationMethod: 'charts' as const,
+ };
+ render(, { useRedux: true });
+
+ // Both message content and screenshot width should be visible
+ expect(screen.getByText('Message content')).toBeInTheDocument();
+ expect(screen.getByText('Screenshot width')).toBeInTheDocument();
+});
+
+test('dashboard report hides message content section', () => {
+ const dashboardProps = {
+ ...defaultProps,
+ chart: undefined,
+ dashboardName: 'My Dashboard',
+ };
+ render(, { useRedux: true });
+
+ // Message content (radio group) should NOT be visible for dashboard
+ expect(screen.queryByText('Message content')).not.toBeInTheDocument();
+ // Screenshot width SHOULD be visible
+ expect(screen.getByText('Screenshot width')).toBeInTheDocument();
+});
+
+test('renders edit mode when report exists in store', () => {
+ const existingReport = {
+ id: 42,
+ name: 'Existing Dashboard Report',
+ description: 'An existing report',
+ crontab: '0 9 * * 1',
+ creation_method: 'dashboards',
+ report_format: 'PNG',
+ timezone: 'America/New_York',
+ active: true,
+ type: 'Report',
+ dashboard: 1,
+ owners: [1],
+ recipients: [
+ {
+ recipient_config_json: { target: 'test@test.com' },
+ type: 'Email',
+ },
+ ],
+ };
+ const store = createStore(
+ {
+ reports: {
+ dashboards: { 1: existingReport },
+ },
+ },
+ reducerIndex,
+ );
+
+ render(, { useRedux: true, store });
+
+ // Edit mode title
+ expect(screen.getByText('Edit email report')).toBeInTheDocument();
+ // Report name populated from store
+ expect(screen.getByTestId('report-name-test')).toHaveDisplayValue(
+ 'Existing Dashboard Report',
+ );
+ // Save button instead of Add
+ expect(screen.getByRole('button', { name: /save/i })).toBeInTheDocument();
+});
+
+test('edit mode dispatches editReport via PUT on save', async () => {
+ const existingReport = {
+ id: 42,
+ name: 'Existing Report',
+ description: '',
+ crontab: '0 12 * * 1',
+ creation_method: 'dashboards',
+ report_format: 'PNG',
+ timezone: 'America/New_York',
+ active: true,
+ type: 'Report',
+ dashboard: 1,
+ owners: [1],
+ recipients: [
+ {
+ recipient_config_json: { target: 'test@test.com' },
+ type: 'Email',
+ },
+ ],
+ };
+ const store = createStore(
+ {
+ reports: {
+ dashboards: { 1: existingReport },
+ },
+ },
+ reducerIndex,
+ );
+
+ fetchMock.put(
+ 'glob:*/api/v1/report/42',
+ { id: 42, result: {} },
+ {
+ name: 'put-report-42',
+ },
+ );
+
+ render(, { useRedux: true, store });
+
+ expect(screen.getByText('Edit email report')).toBeInTheDocument();
+ const saveButton = screen.getByRole('button', { name: /save/i });
+ await waitFor(() => userEvent.click(saveButton));
+
+ await waitFor(() => {
+ const calls = fetchMock.callHistory.calls('put-report-42');
+ expect(calls.length).toBeGreaterThan(0);
+ });
+
+ const calls = fetchMock.callHistory.calls('put-report-42');
+ const body = JSON.parse(calls[calls.length - 1].options.body as string);
+
+ // Pin critical payload fields to catch regressions
+ expect(body.type).toBe('Report');
+ expect(body.name).toBe('Existing Report');
+ expect(body.crontab).toBe('0 12 * * 1');
+ expect(body.report_format).toBe('PNG');
+ expect(body.dashboard).toBe(1);
+ expect(body.recipients).toBeDefined();
+ expect(body.recipients[0].type).toBe('Email');
+
+ fetchMock.removeRoute('put-report-42');
+});
+
+test('submit failure dispatches danger toast and keeps modal open', async () => {
+ fetchMock.post(REPORT_ENDPOINT, 500, { name: 'post-fail' });
+ const onHide = jest.fn();
+
+ const store = createStore({}, reducerIndex);
+ render(, {
+ useRedux: true,
+ store,
+ });
+
+ const addButton = screen.getByRole('button', { name: /add/i });
+ await waitFor(() => userEvent.click(addButton));
+
+ // The addReport action catches 500 errors, dispatches a danger toast, and re-throws
+ await waitFor(() => {
+ const toasts = (store.getState() as any).messageToasts;
+ expect(toasts.length).toBeGreaterThan(0);
+ expect(
+ toasts.some((t: { text: string }) =>
+ t.text.includes('Failed to create report'),
+ ),
+ ).toBe(true);
+ });
+
+ // Modal stays open — onHide should NOT have been called
+ expect(onHide).not.toHaveBeenCalled();
+ expect(screen.getByText('Schedule a new email report')).toBeInTheDocument();
+
+ fetchMock.removeRoute('post-fail');
});
diff --git a/superset-frontend/src/features/reports/ReportModal/actions.test.ts b/superset-frontend/src/features/reports/ReportModal/actions.test.ts
new file mode 100644
index 00000000000..4c3d2f87888
--- /dev/null
+++ b/superset-frontend/src/features/reports/ReportModal/actions.test.ts
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import fetchMock from 'fetch-mock';
+import {
+ addReport,
+ editReport,
+ deleteActiveReport,
+ fetchUISpecificReport,
+ ADD_REPORT,
+ EDIT_REPORT,
+ DELETE_REPORT,
+ SET_REPORT,
+} from './actions';
+
+const REPORT_ENDPOINT = 'glob:*/api/v1/report/*';
+const REPORT_POST_ENDPOINT = 'glob:*/api/v1/report/';
+
+afterEach(() => {
+ fetchMock.clearHistory().removeRoutes();
+});
+
+test('addReport dispatches ADD_REPORT and success toast on success', async () => {
+ const jsonResponse = { id: 1, result: { name: 'New Report' } };
+ fetchMock.post(REPORT_POST_ENDPOINT, jsonResponse);
+ const dispatch = jest.fn();
+
+ await addReport({ name: 'New Report' })(dispatch);
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).toContain(ADD_REPORT);
+ expect(types).toContain('ADD_TOAST');
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('SUCCESS_TOAST');
+});
+
+test('addReport dispatches danger toast on failure and rejects', async () => {
+ fetchMock.post(REPORT_POST_ENDPOINT, 500);
+ const dispatch = jest.fn();
+
+ await expect(
+ addReport({ name: 'Bad Report' })(dispatch),
+ ).rejects.toBeDefined();
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).not.toContain(ADD_REPORT);
+ expect(types).toContain('ADD_TOAST');
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('DANGER_TOAST');
+});
+
+test('editReport dispatches EDIT_REPORT and success toast on success', async () => {
+ const jsonResponse = { id: 5, result: { name: 'Updated Report' } };
+ fetchMock.put(REPORT_ENDPOINT, jsonResponse);
+ const dispatch = jest.fn();
+
+ await editReport(5, { name: 'Updated Report' })(dispatch);
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).toContain(EDIT_REPORT);
+ expect(types).toContain('ADD_TOAST');
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('SUCCESS_TOAST');
+});
+
+test('editReport dispatches danger toast on failure and rejects', async () => {
+ fetchMock.put(REPORT_ENDPOINT, 500);
+ const dispatch = jest.fn();
+
+ await expect(
+ editReport(5, { name: 'Bad Update' })(dispatch),
+ ).rejects.toBeDefined();
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).not.toContain(EDIT_REPORT);
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('DANGER_TOAST');
+});
+
+test('deleteActiveReport dispatches DELETE_REPORT and success toast on success', async () => {
+ fetchMock.delete(REPORT_ENDPOINT, {});
+ const dispatch = jest.fn();
+ const report = {
+ id: 10,
+ name: 'To Delete',
+ creation_method: 'dashboards',
+ dashboard: 1,
+ };
+
+ await deleteActiveReport(report)(dispatch);
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).toContain(DELETE_REPORT);
+ expect(types).toContain('ADD_TOAST');
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('SUCCESS_TOAST');
+});
+
+test('deleteActiveReport dispatches danger toast on failure', async () => {
+ fetchMock.delete(REPORT_ENDPOINT, 500);
+ const dispatch = jest.fn();
+ const report = { id: 10, name: 'To Delete' };
+
+ await deleteActiveReport(report)(dispatch);
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).not.toContain(DELETE_REPORT);
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('DANGER_TOAST');
+});
+
+test('fetchUISpecificReport dispatches SET_REPORT on success', async () => {
+ const jsonResponse = { result: [{ id: 1, name: 'Dashboard Report' }] };
+ fetchMock.get('glob:*/api/v1/report/?q=*', jsonResponse);
+ const dispatch = jest.fn();
+
+ await fetchUISpecificReport({
+ userId: 1,
+ filterField: 'dashboard_id',
+ creationMethod: 'dashboards',
+ resourceId: 42,
+ })(dispatch);
+
+ const setAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === SET_REPORT,
+ )?.[0];
+ expect(setAction).toBeDefined();
+ expect(setAction.resourceId).toBe(42);
+ expect(setAction.creationMethod).toBe('dashboards');
+ expect(setAction.filterField).toBe('dashboard_id');
+});
+
+test('fetchUISpecificReport dispatches danger toast on failure', async () => {
+ fetchMock.get('glob:*/api/v1/report/?q=*', 500);
+ const dispatch = jest.fn();
+
+ await fetchUISpecificReport({
+ userId: 1,
+ filterField: 'chart_id',
+ creationMethod: 'charts',
+ resourceId: 10,
+ })(dispatch);
+
+ const types = dispatch.mock.calls.map(([action]: any) => action.type);
+ expect(types).not.toContain(SET_REPORT);
+ expect(types).toContain('ADD_TOAST');
+ const toastAction = dispatch.mock.calls.find(
+ ([a]: any) => a.type === 'ADD_TOAST',
+ )?.[0];
+ expect(toastAction.payload.toastType).toBe('DANGER_TOAST');
+});
diff --git a/superset-frontend/src/features/reports/ReportModal/actions.ts b/superset-frontend/src/features/reports/ReportModal/actions.ts
index db28f9360ec..6ae2c3e6ee3 100644
--- a/superset-frontend/src/features/reports/ReportModal/actions.ts
+++ b/superset-frontend/src/features/reports/ReportModal/actions.ts
@@ -169,8 +169,9 @@ export const addReport =
dispatch({ type: ADD_REPORT, json } as AddReportAction);
dispatch(addSuccessToast(t('The report has been created')));
})
- .catch(() => {
+ .catch(err => {
dispatch(addDangerToast(t('Failed to create report')));
+ throw err;
});
export const EDIT_REPORT = 'EDIT_REPORT' as const;
@@ -191,8 +192,9 @@ export const editReport =
dispatch({ type: EDIT_REPORT, json } as EditReportAction);
dispatch(addSuccessToast(t('Report updated')));
})
- .catch(() => {
+ .catch(err => {
dispatch(addDangerToast(t('Failed to update report')));
+ throw err;
});
export function toggleActive(report: ReportObject, isActive: boolean) {
diff --git a/superset-frontend/src/features/reports/ReportModal/reducer.test.ts b/superset-frontend/src/features/reports/ReportModal/reducer.test.ts
new file mode 100644
index 00000000000..51f39302c89
--- /dev/null
+++ b/superset-frontend/src/features/reports/ReportModal/reducer.test.ts
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import reportsReducer, { ReportsState } from './reducer';
+import {
+ SET_REPORT,
+ ADD_REPORT,
+ EDIT_REPORT,
+ DELETE_REPORT,
+ SetReportAction,
+ AddReportAction,
+ EditReportAction,
+ DeleteReportAction,
+} from './actions';
+import { ReportObject } from 'src/features/reports/types';
+
+const makeReport = (overrides: Partial = {}): ReportObject => ({
+ active: true,
+ crontab: '0 12 * * 1',
+ name: 'Test Report',
+ owners: [1],
+ recipients: [
+ {
+ recipient_config_json: { target: 'a@b.com', ccTarget: '', bccTarget: '' },
+ type: 'Email',
+ },
+ ],
+ report_format: 'PNG',
+ timezone: 'UTC',
+ type: 'Report',
+ validator_config_json: null,
+ validator_type: '',
+ working_timeout: 3600,
+ log_retention: 90,
+ creation_method: 'dashboards',
+ force_screenshot: false,
+ ...overrides,
+});
+
+test('SET_REPORT stores report keyed by resourceId under creationMethod', () => {
+ const report = makeReport({ id: 5, dashboard: 10 });
+ const action: SetReportAction = {
+ type: SET_REPORT,
+ report: { result: [report] },
+ resourceId: 10,
+ creationMethod: 'dashboards',
+ filterField: 'dashboard_id',
+ };
+
+ const result = reportsReducer({}, action);
+
+ expect(result.dashboards?.[10]).toEqual(report);
+});
+
+test('SET_REPORT removes entry when API returns empty result', () => {
+ const initial: ReportsState = {
+ dashboards: { 10: makeReport({ id: 5, dashboard: 10 }) },
+ };
+ const action: SetReportAction = {
+ type: SET_REPORT,
+ report: { result: [] },
+ resourceId: 10,
+ creationMethod: 'dashboards',
+ filterField: 'dashboard_id',
+ };
+
+ const result = reportsReducer(initial, action);
+
+ expect(result.dashboards?.[10]).toBeUndefined();
+});
+
+test('SET_REPORT uses chart property when filterField is chart_id', () => {
+ const report = makeReport({
+ id: 7,
+ chart: 42,
+ creation_method: 'charts',
+ });
+ const action: SetReportAction = {
+ type: SET_REPORT,
+ report: { result: [report] },
+ resourceId: 42,
+ creationMethod: 'charts',
+ filterField: 'chart_id',
+ };
+
+ const result = reportsReducer({}, action);
+
+ expect(result.charts?.[42]).toEqual(report);
+});
+
+test('ADD_REPORT keys dashboard report by dashboard id', () => {
+ const action: AddReportAction = {
+ type: ADD_REPORT,
+ json: {
+ id: 1,
+ result: { dashboard: 10, creation_method: 'dashboards' },
+ },
+ };
+
+ const result = reportsReducer({}, action);
+
+ expect(result.dashboards?.[10]).toMatchObject({ id: 1, dashboard: 10 });
+});
+
+test('ADD_REPORT keys alerts_reports report by report id', () => {
+ const action: AddReportAction = {
+ type: ADD_REPORT,
+ json: {
+ id: 99,
+ result: { creation_method: 'alerts_reports' },
+ },
+ };
+
+ const result = reportsReducer({}, action);
+
+ expect(result.alerts_reports?.[99]).toMatchObject({ id: 99 });
+});
+
+test('ADD_REPORT returns unchanged state when key is undefined', () => {
+ const initial: ReportsState = { dashboards: {} };
+ const action: AddReportAction = {
+ type: ADD_REPORT,
+ json: {
+ id: 1,
+ result: { creation_method: 'dashboards' },
+ // no dashboard or chart field → key is undefined
+ },
+ };
+
+ const result = reportsReducer(initial, action);
+
+ expect(result).toBe(initial);
+});
+
+test('EDIT_REPORT replaces existing report at same key', () => {
+ const initial: ReportsState = {
+ dashboards: {
+ 10: makeReport({ id: 1, dashboard: 10, name: 'Old Name' }),
+ },
+ };
+ const action: EditReportAction = {
+ type: EDIT_REPORT,
+ json: {
+ id: 1,
+ result: {
+ dashboard: 10,
+ creation_method: 'dashboards',
+ name: 'New Name',
+ },
+ },
+ };
+
+ const result = reportsReducer(initial, action);
+
+ expect(result.dashboards?.[10]?.name).toBe('New Name');
+});
+
+test('DELETE_REPORT removes report from state', () => {
+ const report = makeReport({
+ id: 5,
+ dashboard: 10,
+ creation_method: 'dashboards',
+ });
+ const initial: ReportsState = { dashboards: { 10: report } };
+ const action: DeleteReportAction = {
+ type: DELETE_REPORT,
+ report: {
+ id: 5,
+ dashboard: 10,
+ creation_method: 'dashboards',
+ },
+ };
+
+ const result = reportsReducer(initial, action);
+
+ expect(result.dashboards?.[10]).toBeUndefined();
+});
+
+test('DELETE_REPORT for alerts_reports keys by report id', () => {
+ const report = makeReport({ id: 99, creation_method: 'alerts_reports' });
+ const initial: ReportsState = { alerts_reports: { 99: report } };
+ const action: DeleteReportAction = {
+ type: DELETE_REPORT,
+ report: { id: 99, creation_method: 'alerts_reports' },
+ };
+
+ const result = reportsReducer(initial, action);
+
+ expect(result.alerts_reports?.[99]).toBeUndefined();
+});
+
+test('unknown action type returns state unchanged', () => {
+ const initial: ReportsState = { dashboards: { 1: makeReport({ id: 1 }) } };
+ const action = { type: 'UNKNOWN_ACTION' } as any;
+
+ const result = reportsReducer(initial, action);
+
+ expect(result).toBe(initial);
+});
diff --git a/superset-frontend/src/pages/AlertReportList/AlertReportList.test.tsx b/superset-frontend/src/pages/AlertReportList/AlertReportList.test.tsx
index 588478791ad..8ac726eb4d0 100644
--- a/superset-frontend/src/pages/AlertReportList/AlertReportList.test.tsx
+++ b/superset-frontend/src/pages/AlertReportList/AlertReportList.test.tsx
@@ -16,271 +16,469 @@
* specific language governing permissions and limitations
* under the License.
*/
+import type React from 'react';
import fetchMock from 'fetch-mock';
-import configureStore from 'redux-mock-store';
-import thunk from 'redux-thunk';
import {
render,
screen,
fireEvent,
waitFor,
+ createStore,
} from 'spec/helpers/testing-library';
+import { Provider } from 'react-redux';
import { MemoryRouter } from 'react-router-dom';
import { QueryParamProvider } from 'use-query-params';
import { ReactRouter5Adapter } from 'use-query-params/adapters/react-router-5';
-import React from 'react';
import AlertListComponent from 'src/pages/AlertReportList';
-// Cast to accept partial mock props in tests
+jest.setTimeout(30000);
+
const AlertList = AlertListComponent as unknown as React.FC<
Record
>;
-const mockStore = configureStore([thunk]);
-const store = mockStore({});
+// -- Mock data (IDs start at 1 to avoid the `if (data?.id)` falsy guard) --
-const alertsEndpoint = 'glob:*/api/v1/report/?*';
-const alertEndpoint = 'glob:*/api/v1/report/*';
-const alertsInfoEndpoint = 'glob:*/api/v1/report/_info*';
-const alertsCreatedByEndpoint = 'glob:*/api/v1/report/related/created_by*';
+const mockAlerts = [
+ {
+ id: 1,
+ name: 'Weekly Sales Alert',
+ active: true,
+ last_state: 'Success',
+ type: 'Alert',
+ owners: [{ id: 1, first_name: 'Admin', last_name: 'User' }],
+ recipients: [{ id: 1, type: 'Email' }],
+ changed_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ changed_on_delta_humanized: '1 day ago',
+ created_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ created_on: new Date().toISOString(),
+ last_eval_dttm: Date.now(),
+ crontab: '0 9 * * 1',
+ crontab_humanized: 'Every Monday at 09:00',
+ timezone: 'UTC',
+ },
+ {
+ id: 2,
+ name: 'Daily Error Alert',
+ active: true,
+ last_state: 'Error',
+ type: 'Alert',
+ owners: [{ id: 2, first_name: 'Data', last_name: 'Analyst' }],
+ recipients: [{ id: 2, type: 'Slack' }],
+ changed_by: { id: 2, first_name: 'Data', last_name: 'Analyst' },
+ changed_on_delta_humanized: '2 days ago',
+ created_by: { id: 2, first_name: 'Data', last_name: 'Analyst' },
+ created_on: new Date().toISOString(),
+ last_eval_dttm: Date.now(),
+ crontab: '0 8 * * *',
+ crontab_humanized: 'Every day at 08:00',
+ timezone: 'US/Pacific',
+ },
+ {
+ id: 3,
+ name: 'Monthly Revenue Alert',
+ active: false,
+ last_state: 'Working',
+ type: 'Alert',
+ owners: [{ id: 1, first_name: 'Admin', last_name: 'User' }],
+ recipients: [{ id: 3, type: 'Email' }],
+ changed_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ changed_on_delta_humanized: '5 days ago',
+ created_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ created_on: new Date().toISOString(),
+ last_eval_dttm: Date.now(),
+ crontab: '0 0 1 * *',
+ crontab_humanized: 'First day of the month',
+ timezone: 'UTC',
+ },
+];
-const mockalerts = Array.from({ length: 3 }, (_, i) => ({
- active: true,
- changed_by: {
- first_name: `user ${i}`,
- id: i,
+const mockReports = [
+ {
+ id: 10,
+ name: 'Weekly Dashboard Report',
+ active: true,
+ last_state: 'Success',
+ type: 'Report',
+ owners: [{ id: 1, first_name: 'Admin', last_name: 'User' }],
+ recipients: [{ id: 10, type: 'Email' }],
+ changed_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ changed_on_delta_humanized: '1 day ago',
+ created_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ created_on: new Date().toISOString(),
+ last_eval_dttm: Date.now(),
+ crontab: '0 9 * * 1',
+ crontab_humanized: 'Every Monday at 09:00',
+ timezone: 'UTC',
},
- changed_on_delta_humanized: `${i} day(s) ago`,
- created_by: {
- first_name: `user ${i}`,
- id: i,
+ {
+ id: 11,
+ name: 'Monthly KPI Report',
+ active: false,
+ last_state: 'Not triggered',
+ type: 'Report',
+ owners: [{ id: 1, first_name: 'Admin', last_name: 'User' }],
+ recipients: [{ id: 11, type: 'Slack' }],
+ changed_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ changed_on_delta_humanized: '3 days ago',
+ created_by: { id: 1, first_name: 'Admin', last_name: 'User' },
+ created_on: new Date().toISOString(),
+ last_eval_dttm: Date.now(),
+ crontab: '0 0 1 * *',
+ crontab_humanized: 'First day of the month',
+ timezone: 'UTC',
},
- created_on: new Date().toISOString,
- id: i,
- last_eval_dttm: Date.now(),
- last_state: 'ok',
- name: `alert ${i} `,
- owners: [{ id: 1 }],
- recipients: [
- {
- id: `${i}`,
- type: 'email',
- },
- ],
- type: 'alert',
-}));
+];
const mockUser = {
userId: 1,
- firstName: 'user 1',
- lastName: 'lastname',
+ firstName: 'Admin',
+ lastName: 'User',
};
-fetchMock.get(alertsEndpoint, {
- ids: [2, 0, 1],
- result: mockalerts,
- count: 3,
-});
-fetchMock.get(alertsInfoEndpoint, {
- permissions: ['can_write'],
-});
-fetchMock.get(alertsCreatedByEndpoint, { result: [] });
-fetchMock.put(alertEndpoint, { ...mockalerts[0], active: false });
-fetchMock.put(alertsEndpoint, { ...mockalerts[0], active: false });
-fetchMock.delete(alertEndpoint, {});
-fetchMock.delete(alertsEndpoint, {});
+// -- API endpoints (named for cleanup) --
-const renderAlertList = (props = {}) =>
- render(
-
-
-
-
- ,
- {
- useRedux: true,
- store,
+const ENDPOINTS = {
+ LIST: 'glob:*/api/v1/report/?*',
+ INFO: 'glob:*/api/v1/report/_info*',
+ SINGLE: 'glob:*/api/v1/report/*',
+ CREATED_BY: 'glob:*/api/v1/report/related/created_by*',
+ OWNERS: 'glob:*/api/v1/report/related/owners*',
+ CHANGED_BY: 'glob:*/api/v1/report/related/changed_by*',
+};
+
+// -- Render helper --
+
+const renderAlertList = (props: Record = {}) => {
+ const store = createStore();
+ return render(
+
+
+
+
+
+
+ ,
+ );
+};
+
+// -- Dynamic list endpoint: returns alerts or reports based on URL filter --
+
+const setupMocks = (
+ permissions: string[] = ['can_read', 'can_write'],
+ listData?: typeof mockAlerts,
+) => {
+ fetchMock.get(ENDPOINTS.INFO, { permissions }, { name: 'info' });
+
+ fetchMock.get(
+ ENDPOINTS.LIST,
+ ({ url }: any) => {
+ if (listData) {
+ return {
+ result: listData,
+ count: listData.length,
+ ids: listData.map(a => a.id),
+ };
+ }
+ const data = url.includes('value:Report') ? mockReports : mockAlerts;
+ return { result: data, count: data.length, ids: data.map(a => a.id) };
},
+ { name: 'list' },
);
-// eslint-disable-next-line no-restricted-globals -- TODO: Migrate from describe blocks
-describe('AlertList', () => {
- beforeEach(() => {
- fetchMock.clearHistory();
- });
+ fetchMock.get(ENDPOINTS.CREATED_BY, { result: [] }, { name: 'created-by' });
- test('renders', async () => {
- renderAlertList();
- expect(await screen.findByText('Alerts & reports')).toBeInTheDocument();
- });
+ fetchMock.get(ENDPOINTS.OWNERS, { result: [], count: 0 }, { name: 'owners' });
- test('renders a SubMenu', async () => {
- renderAlertList();
- expect(await screen.findByRole('navigation')).toBeInTheDocument();
- });
+ fetchMock.get(
+ ENDPOINTS.CHANGED_BY,
+ { result: [], count: 0 },
+ { name: 'changed-by' },
+ );
- test('renders a ListView', async () => {
- renderAlertList();
- expect(await screen.findByTestId('alerts-list-view')).toBeInTheDocument();
- });
+ fetchMock.put(
+ ENDPOINTS.SINGLE,
+ { result: { ...mockAlerts[0], active: false } },
+ { name: 'put-alert' },
+ );
- test('renders switches', async () => {
- renderAlertList();
- // Wait for the list to load first
- await screen.findByTestId('alerts-list-view');
- const switches = await screen.findAllByRole('switch');
- expect(switches).toHaveLength(3);
- });
+ fetchMock.delete(ENDPOINTS.SINGLE, {}, { name: 'delete-alert' });
- test('deletes', async () => {
- renderAlertList();
+ fetchMock.delete(
+ ENDPOINTS.LIST,
+ { message: 'Deleted' },
+ { name: 'delete-bulk' },
+ );
+};
- // Wait for list to load
- await screen.findByTestId('alerts-list-view');
+// -- Setup / teardown --
- // Find and click first delete button
- const deleteButtons = await screen.findAllByTestId('delete-action');
- fireEvent.click(deleteButtons[0]);
-
- // Wait for modal to appear and find the delete input
- const deleteInput = await screen.findByTestId('delete-modal-input');
- fireEvent.change(deleteInput, { target: { value: 'DELETE' } });
-
- // Click confirm button
- const confirmButton = await screen.findByTestId('modal-confirm-button');
- fireEvent.click(confirmButton);
-
- // Wait for delete request
- await waitFor(() => {
- expect(
- fetchMock.callHistory.calls(/report\/0/, { method: 'DELETE' }),
- ).toHaveLength(1);
- });
- }, 15000);
-
- test('shows/hides bulk actions when bulk actions is clicked', async () => {
- renderAlertList();
-
- // Wait for list to load and initial state
- await screen.findByTestId('alerts-list-view');
- expect(
- screen.queryByTestId('bulk-select-controls'),
- ).not.toBeInTheDocument();
-
- // Click bulk select toggle
- const bulkSelectButton = await screen.findByTestId('bulk-select-toggle');
- fireEvent.click(bulkSelectButton);
-
- // Verify bulk select controls appear
- expect(
- await screen.findByTestId('bulk-select-controls'),
- ).toBeInTheDocument();
- }, 15000);
-
- test('hides bulk actions when switch between alert and report list', async () => {
- // Start with alert list
- renderAlertList();
-
- // Wait for list to load
- await screen.findByTestId('alerts-list-view');
-
- // Click bulk select to show controls
- const bulkSelectButton = await screen.findByTestId('bulk-select-toggle');
- fireEvent.click(bulkSelectButton);
-
- // Verify bulk select controls appear
- expect(
- await screen.findByTestId('bulk-select-controls'),
- ).toBeInTheDocument();
-
- // Verify alert tab is active
- const alertTab = await screen.findByTestId('alert-list');
- expect(alertTab).toHaveClass('active');
- const reportTab = screen.getByTestId('report-list');
- expect(reportTab).not.toHaveClass('active');
-
- // Switch to report list
- renderAlertList({ isReportEnabled: true });
-
- // Wait for report list API call and tab states to update
- await waitFor(async () => {
- // Check API call
- const calls = fetchMock.callHistory.calls(/report\/\?q/);
- const hasReportCall = calls.some(call =>
- call.url.includes('filters:!((col:type,opr:eq,value:Report))'),
- );
-
- // Check tab states
- const reportTabs = screen.getAllByTestId('report-list');
- const alertTabs = screen.getAllByTestId('alert-list');
- const hasActiveReport = reportTabs.some(tab =>
- tab.classList.contains('active'),
- );
- const hasNoActiveAlert = alertTabs.every(
- tab => !tab.classList.contains('active'),
- );
-
- return hasReportCall && hasActiveReport && hasNoActiveAlert;
- });
-
- // Click bulk select toggle again to hide controls
- const bulkSelectButtons =
- await screen.findAllByTestId('bulk-select-toggle');
- fireEvent.click(bulkSelectButtons[0]);
-
- // Verify final state
- await waitFor(() => {
- expect(
- screen.queryByTestId('bulk-select-controls'),
- ).not.toBeInTheDocument();
- });
-
- // Verify correct API call was made
- const reportCalls = fetchMock.callHistory.calls(/report\/\?q/);
- const lastReportCall = reportCalls[reportCalls.length - 1].url;
- expect(lastReportCall).toContain(
- 'filters:!((col:type,opr:eq,value:Report))',
- );
- }, 15000);
-
- test('renders listview table correctly', async () => {
- renderAlertList();
- await screen.findByTestId('alerts-list-view');
-
- const table = await screen.findByTestId('listview-table');
- expect(table).toBeInTheDocument();
- expect(table).toBeVisible();
- }, 15000);
-
- test('renders correct column headers for alerts', async () => {
- renderAlertList();
- await screen.findByTestId('alerts-list-view');
-
- expect(screen.getByTitle('Last run')).toBeInTheDocument();
- expect(
- screen.getByRole('columnheader', { name: /name/i }),
- ).toBeInTheDocument();
- expect(screen.getByTitle('Schedule')).toBeInTheDocument();
- expect(screen.getByTitle('Notification method')).toBeInTheDocument();
- expect(screen.getByTitle('Owners')).toBeInTheDocument();
- expect(screen.getByTitle('Last modified')).toBeInTheDocument();
- expect(screen.getByTitle('Active')).toBeInTheDocument();
- expect(screen.getByTitle('Actions')).toBeInTheDocument();
- }, 15000);
-
- test('renders correct column headers for reports', async () => {
- renderAlertList({ isReportEnabled: true });
- await screen.findByTestId('alerts-list-view');
-
- expect(screen.getByTitle('Last run')).toBeInTheDocument();
- expect(
- screen.getByRole('columnheader', { name: /name/i }),
- ).toBeInTheDocument();
- expect(screen.getByTitle('Schedule')).toBeInTheDocument();
- expect(screen.getByTitle('Notification method')).toBeInTheDocument();
- expect(screen.getByTitle('Owners')).toBeInTheDocument();
- expect(screen.getByTitle('Last modified')).toBeInTheDocument();
- expect(screen.getByTitle('Active')).toBeInTheDocument();
- expect(screen.getByTitle('Actions')).toBeInTheDocument();
- }, 15000);
+beforeEach(() => {
+ fetchMock.removeRoutes().clearHistory();
+ setupMocks();
+});
+
+afterEach(() => {
+ fetchMock.removeRoutes().clearHistory();
+});
+
+// -- Tests --
+
+test('loads rows from API and renders alert names, status, and actions', async () => {
+ renderAlertList();
+
+ // All 3 alert names appear
+ await screen.findByText('Weekly Sales Alert');
+ expect(screen.getByText('Daily Error Alert')).toBeInTheDocument();
+ expect(screen.getByText('Monthly Revenue Alert')).toBeInTheDocument();
+
+ // Active switches rendered for each row
+ const switches = screen.getAllByRole('switch');
+ expect(switches).toHaveLength(3);
+
+ // Delete actions present for owned alerts (userId=1 owns alerts 1 and 3)
+ const deleteButtons = screen.getAllByTestId('delete-action');
+ expect(deleteButtons.length).toBeGreaterThanOrEqual(2);
+
+ // Column headers
+ expect(screen.getByTitle('Last run')).toBeInTheDocument();
+ expect(
+ screen.getByRole('columnheader', { name: /name/i }),
+ ).toBeInTheDocument();
+ expect(screen.getByTitle('Schedule')).toBeInTheDocument();
+ expect(screen.getByTitle('Active')).toBeInTheDocument();
+ expect(screen.getByTitle('Actions')).toBeInTheDocument();
+
+ // API was called with Alert filter
+ const listCalls = fetchMock.callHistory.calls('list');
+ expect(listCalls.length).toBeGreaterThanOrEqual(1);
+ expect(listCalls[0].url).toContain('value:Alert');
+});
+
+test('toggle active sends PUT and updates switch state', async () => {
+ renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+
+ const switches = screen.getAllByRole('switch');
+ // First switch is for alert id=1 (owned by user, active: true)
+ expect(switches[0]).toBeChecked();
+
+ fireEvent.click(switches[0]);
+
+ // PUT called with active=false
+ await waitFor(() => {
+ const putCalls = fetchMock.callHistory.calls('put-alert');
+ expect(putCalls).toHaveLength(1);
+ });
+
+ const putCalls = fetchMock.callHistory.calls('put-alert');
+ const body = JSON.parse(putCalls[0].options.body as string);
+ expect(body.active).toBe(false);
+ expect(putCalls[0].url).toContain('/report/1');
+});
+
+test('toggle active rolls back on failed update', async () => {
+ renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+
+ // Replace PUT with 500 error
+ fetchMock.removeRoute('put-alert');
+ fetchMock.put(ENDPOINTS.SINGLE, 500, { name: 'put-fail' });
+
+ const switches = screen.getAllByRole('switch');
+ expect(switches[0]).toBeChecked();
+
+ fireEvent.click(switches[0]);
+
+ // PUT was attempted
+ await waitFor(() => {
+ expect(fetchMock.callHistory.calls('put-fail')).toHaveLength(1);
+ });
+
+ // Switch rolls back to checked (server rejected)
+ await waitFor(() => {
+ expect(switches[0]).toBeChecked();
+ });
+});
+
+test('switching to Reports refetches and renders only report rows', async () => {
+ // Render alerts mode first
+ const { unmount } = renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+ unmount();
+
+ // Render reports mode
+ fetchMock.clearHistory();
+ renderAlertList({ isReportEnabled: true });
+
+ await screen.findByText('Weekly Dashboard Report');
+ expect(screen.getByText('Monthly KPI Report')).toBeInTheDocument();
+
+ // Alert names should not appear
+ expect(screen.queryByText('Weekly Sales Alert')).not.toBeInTheDocument();
+ expect(screen.queryByText('Daily Error Alert')).not.toBeInTheDocument();
+
+ // API called with Report filter
+ const listCalls = fetchMock.callHistory.calls('list');
+ expect(listCalls.length).toBeGreaterThanOrEqual(1);
+ const reportCall = listCalls.find((c: any) => c.url.includes('value:Report'));
+ expect(reportCall).toBeDefined();
+});
+
+test('delete removes row after confirmation', async () => {
+ // Track deletions so the GET mock reflects them on refetch
+ const deletedIds = new Set();
+ fetchMock.removeRoute('list');
+ fetchMock.get(
+ ENDPOINTS.LIST,
+ (_callLog: any) => {
+ const remaining = mockAlerts.filter(a => !deletedIds.has(a.id));
+ return {
+ result: remaining,
+ count: remaining.length,
+ ids: remaining.map(a => a.id),
+ };
+ },
+ { name: 'list' },
+ );
+
+ // Override DELETE to mark the alert as deleted before returning
+ fetchMock.removeRoute('delete-alert');
+ fetchMock.delete(
+ ENDPOINTS.SINGLE,
+ ({ url }: any) => {
+ const match = url.match(/\/report\/(\d+)/);
+ if (match) deletedIds.add(Number(match[1]));
+ return {};
+ },
+ { name: 'delete-alert' },
+ );
+
+ renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+
+ // Click delete on first owned alert
+ const deleteButtons = screen.getAllByTestId('delete-action');
+ fireEvent.click(deleteButtons[0]);
+
+ // Confirm in delete modal
+ const deleteInput = await screen.findByTestId('delete-modal-input');
+ fireEvent.change(deleteInput, { target: { value: 'DELETE' } });
+ const confirmButton = await screen.findByTestId('modal-confirm-button');
+ fireEvent.click(confirmButton);
+
+ // Row disappears after refetch
+ await waitFor(() => {
+ expect(screen.queryByText('Weekly Sales Alert')).not.toBeInTheDocument();
+ });
+
+ // Other alerts remain
+ expect(screen.getByText('Daily Error Alert')).toBeInTheDocument();
+});
+
+test('delete failure leaves row visible', async () => {
+ // Replace DELETE with 500
+ fetchMock.removeRoute('delete-alert');
+ fetchMock.delete(ENDPOINTS.SINGLE, 500, { name: 'delete-fail' });
+
+ renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+
+ const deleteButtons = screen.getAllByTestId('delete-action');
+ fireEvent.click(deleteButtons[0]);
+
+ const deleteInput = await screen.findByTestId('delete-modal-input');
+ fireEvent.change(deleteInput, { target: { value: 'DELETE' } });
+ const confirmButton = await screen.findByTestId('modal-confirm-button');
+ fireEvent.click(confirmButton);
+
+ await waitFor(() => {
+ expect(fetchMock.callHistory.calls('delete-fail')).toHaveLength(1);
+ });
+
+ // Row stays visible
+ expect(screen.getByText('Weekly Sales Alert')).toBeInTheDocument();
+});
+
+test('bulk select shows selected count and enables bulk actions after row selection', async () => {
+ renderAlertList();
+ await screen.findByText('Weekly Sales Alert');
+
+ // Bulk select controls not visible initially
+ expect(screen.queryByTestId('bulk-select-controls')).not.toBeInTheDocument();
+
+ // Toggle bulk select
+ const bulkSelectButton = screen.getByTestId('bulk-select-toggle');
+ fireEvent.click(bulkSelectButton);
+
+ // Controls appear with "0 Selected" text
+ await screen.findByTestId('bulk-select-controls');
+ expect(screen.getByTestId('bulk-select-copy')).toHaveTextContent(
+ '0 Selected',
+ );
+
+ // Deselect-all and action button not yet visible (nothing selected)
+ expect(
+ screen.queryByTestId('bulk-select-deselect-all'),
+ ).not.toBeInTheDocument();
+
+ // Select all rows via checkboxes that appear in bulk mode
+ const checkboxes = screen.getAllByRole('checkbox');
+ // First checkbox is the header "select all" toggle
+ fireEvent.click(checkboxes[0]);
+
+ // Bulk action button and deselect-all appear after selection
+ await waitFor(() => {
+ expect(screen.getByTestId('bulk-select-action')).toBeInTheDocument();
+ });
+ expect(screen.getByTestId('bulk-select-deselect-all')).toBeInTheDocument();
+ expect(screen.getByTestId('bulk-select-copy')).toHaveTextContent(
+ '3 Selected',
+ );
+});
+
+test('read-only users do not see delete and bulk select controls', async () => {
+ fetchMock.removeRoutes().clearHistory();
+ setupMocks(['can_read']); // no can_write
+
+ const readOnlyUser = {
+ userId: 99,
+ firstName: 'Read',
+ lastName: 'Only',
+ };
+
+ const store = createStore();
+
+ render(
+
+
+
+
+
+
+ ,
+ );
+
+ await screen.findByText('Weekly Sales Alert');
+
+ // No delete action buttons
+ expect(screen.queryAllByTestId('delete-action')).toHaveLength(0);
+
+ // No bulk select toggle
+ expect(screen.queryByTestId('bulk-select-toggle')).not.toBeInTheDocument();
+
+ // Switches are all disabled (user 99 doesn't own any alerts)
+ const switches = screen.getAllByRole('switch');
+ switches.forEach(sw => {
+ expect(sw).toBeDisabled();
+ });
+});
+
+test('empty API result shows empty state', async () => {
+ fetchMock.removeRoutes().clearHistory();
+ setupMocks(['can_read', 'can_write'], []);
+
+ renderAlertList();
+
+ expect(await screen.findByText(/no alerts yet/i)).toBeInTheDocument();
});
diff --git a/superset-frontend/src/pages/AlertReportList/index.tsx b/superset-frontend/src/pages/AlertReportList/index.tsx
index c457b74617f..94ba9186706 100644
--- a/superset-frontend/src/pages/AlertReportList/index.tsx
+++ b/superset-frontend/src/pages/AlertReportList/index.tsx
@@ -242,9 +242,13 @@ function AlertList({
}),
);
- updateResource(update_id, { active: checked }, false, false)
- .then()
- .catch(() => setResourceCollection(original));
+ updateResource(update_id, { active: checked }, false, false).then(
+ response => {
+ if (!response) {
+ setResourceCollection(original);
+ }
+ },
+ );
}
},
[alerts, setResourceCollection, updateResource],
diff --git a/superset/commands/report/execute.py b/superset/commands/report/execute.py
index 72c84986273..5e31c575a2d 100644
--- a/superset/commands/report/execute.py
+++ b/superset/commands/report/execute.py
@@ -287,14 +287,19 @@ class BaseReportState:
except json.JSONDecodeError:
logger.debug("Anchor value is not a list, Fall back to single tab")
+ # Merge native_filters into existing urlParams instead of
+ # overwriting — dashboard_state may already have urlParams
+ # (e.g. standalone=true) that must be preserved.
+ state: DashboardPermalinkState = {**dashboard_state}
+ existing_params: list[tuple[str, str]] = state.get("urlParams") or []
+ merged_params: list[list[str]] = [
+ list(p) for p in existing_params if p[0] != "native_filters"
+ ]
+ merged_params.append(["native_filters", native_filter_params or ""])
+ state["urlParams"] = merged_params # type: ignore[typeddict-item]
return [
self._get_tab_url(
- {
- "urlParams": [
- ["native_filters", native_filter_params] # type: ignore
- ],
- **dashboard_state,
- },
+ state,
user_friendly=user_friendly,
)
]
diff --git a/superset/reports/models.py b/superset/reports/models.py
index 66f37eefddd..db70595063b 100644
--- a/superset/reports/models.py
+++ b/superset/reports/models.py
@@ -334,11 +334,11 @@ class ReportSchedule(AuditMixinNullable, ExtraJSONMixin, Model):
"filterState": {
"value": [min_val, max_val],
"label": f"{min_val} ≤ x ≤ {max_val}"
- if min_val and max_val
+ if min_val is not None and max_val is not None
else f"x ≥ {min_val}"
- if min_val
+ if min_val is not None
else f"x ≤ {max_val}"
- if max_val
+ if max_val is not None
else "",
},
"ownState": {},
diff --git a/tests/unit_tests/commands/report/create_test.py b/tests/unit_tests/commands/report/create_test.py
new file mode 100644
index 00000000000..aa3f92a71a7
--- /dev/null
+++ b/tests/unit_tests/commands/report/create_test.py
@@ -0,0 +1,222 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from typing import Any
+from unittest.mock import MagicMock
+
+import pytest
+from marshmallow import ValidationError
+from pytest_mock import MockerFixture
+
+from superset.commands.report.create import CreateReportScheduleCommand
+from superset.commands.report.exceptions import (
+ DatabaseNotFoundValidationError,
+ ReportScheduleAlertRequiredDatabaseValidationError,
+ ReportScheduleInvalidError,
+)
+from superset.reports.models import ReportScheduleType
+from superset.utils import json
+
+
+def _make_dashboard(position: dict[str, Any]) -> MagicMock:
+ dashboard = MagicMock()
+ dashboard.position_json = json.dumps(position)
+ return dashboard
+
+
+def test_validate_report_extra_null_extra() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {"extra": None}
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_null_dashboard() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {"extra": {"dashboard": {}}, "dashboard": None}
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_empty_active_tabs() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"activeTabs": []}},
+ "dashboard": _make_dashboard({"TAB-1": {}, "TAB-2": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_valid_tabs() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"activeTabs": ["TAB-1"]}},
+ "dashboard": _make_dashboard({"TAB-1": {}, "TAB-2": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_invalid_tabs() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"activeTabs": ["TAB-999"]}},
+ "dashboard": _make_dashboard({"TAB-1": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 1
+ assert exceptions[0].field_name == "extra"
+
+
+def test_validate_report_extra_anchor_json_valid() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"anchor": '["TAB-1"]'}},
+ "dashboard": _make_dashboard({"TAB-1": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_anchor_invalid_ids() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"anchor": '["TAB-999"]'}},
+ "dashboard": _make_dashboard({"TAB-1": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 1
+ assert exceptions[0].field_name == "extra"
+
+
+def test_validate_report_extra_anchor_string_valid() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"anchor": "TAB-1"}},
+ "dashboard": _make_dashboard({"TAB-1": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 0
+
+
+def test_validate_report_extra_anchor_string_invalid() -> None:
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "extra": {"dashboard": {"anchor": "TAB-999"}},
+ "dashboard": _make_dashboard({"TAB-1": {}}),
+ }
+
+ exceptions: list[ValidationError] = []
+ command._validate_report_extra(exceptions)
+
+ assert len(exceptions) == 1
+ assert exceptions[0].field_name == "extra"
+
+
+# ---------------------------------------------------------------------------
+# Phase 1 gap closure: validate() — alert + database combos
+# ---------------------------------------------------------------------------
+
+
+def _stub_validate_deps(mocker: MockerFixture) -> None:
+ """Stub out all DAO and base-class calls inside validate() so the test
+ can exercise a single validation branch in isolation."""
+ mocker.patch.object(CreateReportScheduleCommand, "_populate_recipients")
+ mocker.patch(
+ "superset.commands.report.create.ReportScheduleDAO.validate_update_uniqueness",
+ return_value=True,
+ )
+ mocker.patch.object(CreateReportScheduleCommand, "validate_report_frequency")
+ mocker.patch.object(CreateReportScheduleCommand, "validate_chart_dashboard")
+ mocker.patch.object(CreateReportScheduleCommand, "_validate_report_extra")
+ mocker.patch(
+ "superset.commands.report.create.ReportScheduleDAO"
+ ".validate_unique_creation_method",
+ return_value=True,
+ )
+ mocker.patch.object(CreateReportScheduleCommand, "populate_owners", return_value=[])
+
+
+def test_validate_alert_missing_database_key(mocker: MockerFixture) -> None:
+ """Alert type without a 'database' key raises the required-database error."""
+ _stub_validate_deps(mocker)
+
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "type": ReportScheduleType.ALERT,
+ "name": "Test Alert",
+ "crontab": "* * * * *",
+ "creation_method": "alerts_reports",
+ }
+
+ with pytest.raises(ReportScheduleInvalidError) as exc:
+ command.validate()
+
+ assert any(
+ isinstance(e, ReportScheduleAlertRequiredDatabaseValidationError)
+ for e in exc.value._exceptions
+ )
+
+
+def test_validate_alert_nonexistent_database(mocker: MockerFixture) -> None:
+ """Alert type with a database ID that doesn't exist raises not-found."""
+ _stub_validate_deps(mocker)
+ mocker.patch(
+ "superset.commands.report.create.DatabaseDAO.find_by_id",
+ return_value=None,
+ )
+
+ command = CreateReportScheduleCommand({})
+ command._properties = {
+ "type": ReportScheduleType.ALERT,
+ "name": "Test Alert",
+ "crontab": "* * * * *",
+ "creation_method": "alerts_reports",
+ "database": 999,
+ }
+
+ with pytest.raises(ReportScheduleInvalidError) as exc:
+ command.validate()
+
+ assert any(
+ isinstance(e, DatabaseNotFoundValidationError) for e in exc.value._exceptions
+ )
diff --git a/tests/unit_tests/commands/report/execute_test.py b/tests/unit_tests/commands/report/execute_test.py
index f1f73bcf4bc..8b99cf71c11 100644
--- a/tests/unit_tests/commands/report/execute_test.py
+++ b/tests/unit_tests/commands/report/execute_test.py
@@ -16,23 +16,42 @@
# under the License.
import json # noqa: TID251
-from datetime import datetime
+from datetime import datetime, timedelta
from unittest.mock import patch
-from uuid import UUID
+from uuid import UUID, uuid4
import pytest
from pytest_mock import MockerFixture
from superset.app import SupersetApp
from superset.commands.exceptions import UpdateFailedError
-from superset.commands.report.execute import BaseReportState
+from superset.commands.report.exceptions import (
+ ReportScheduleAlertGracePeriodError,
+ ReportScheduleCsvFailedError,
+ ReportSchedulePreviousWorkingError,
+ ReportScheduleScreenshotFailedError,
+ ReportScheduleScreenshotTimeout,
+ ReportScheduleStateNotFoundError,
+ ReportScheduleUnexpectedError,
+ ReportScheduleWorkingTimeoutError,
+)
+from superset.commands.report.execute import (
+ BaseReportState,
+ ReportNotTriggeredErrorState,
+ ReportScheduleStateMachine,
+ ReportSuccessState,
+ ReportWorkingState,
+)
+from superset.daos.report import REPORT_SCHEDULE_ERROR_NOTIFICATION_MARKER
from superset.dashboards.permalink.types import DashboardPermalinkState
from superset.reports.models import (
+ ReportDataFormat,
ReportRecipients,
ReportRecipientType,
ReportSchedule,
ReportScheduleType,
ReportSourceFormat,
+ ReportState,
)
from superset.utils.core import HeaderDataType
from superset.utils.screenshots import ChartScreenshot
@@ -357,6 +376,227 @@ def test_get_dashboard_urls_with_exporting_dashboard_only(
assert expected_url == result[0]
+@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
+@with_feature_flags(ALERT_REPORT_TABS=True)
+def test_get_dashboard_urls_with_filters_and_tabs(
+ mock_permalink_cls,
+ mocker: MockerFixture,
+ app,
+) -> None:
+ mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
+ mock_report_schedule.chart = False
+ mock_report_schedule.chart_id = None
+ mock_report_schedule.dashboard_id = 123
+ mock_report_schedule.type = "report_type"
+ mock_report_schedule.report_format = "report_format"
+ mock_report_schedule.owners = [1, 2]
+ mock_report_schedule.recipients = []
+ native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
+ mock_report_schedule.extra = {
+ "dashboard": {
+ "anchor": json.dumps(["TAB-1", "TAB-2"]),
+ "dataMask": {"NATIVE_FILTER-1": {"filterState": {"value": ["Sales"]}}},
+ "activeTabs": ["TAB-1", "TAB-2"],
+ "urlParams": None,
+ "nativeFilters": [ # type: ignore[typeddict-unknown-key]
+ {
+ "nativeFilterId": "NATIVE_FILTER-1",
+ "filterType": "filter_select",
+ "columnName": "department",
+ "filterValues": ["Sales"],
+ }
+ ],
+ }
+ }
+ mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
+ native_filter_rison,
+ [],
+ )
+ mock_permalink_cls.return_value.run.side_effect = ["key1", "key2"]
+
+ class_instance: BaseReportState = BaseReportState(
+ mock_report_schedule, "January 1, 2021", "execution_id_example"
+ )
+ class_instance._report_schedule = mock_report_schedule
+
+ result: list[str] = class_instance.get_dashboard_urls()
+
+ import urllib.parse
+
+ base_url = app.config.get("WEBDRIVER_BASEURL", "http://0.0.0.0:8080/")
+ assert result == [
+ urllib.parse.urljoin(base_url, "superset/dashboard/p/key1/"),
+ urllib.parse.urljoin(base_url, "superset/dashboard/p/key2/"),
+ ]
+ mock_report_schedule.get_native_filters_params.assert_called_once() # type: ignore[attr-defined]
+ assert mock_permalink_cls.call_count == 2
+ for call in mock_permalink_cls.call_args_list:
+ state = call.kwargs["state"]
+ assert state["urlParams"] == [["native_filters", native_filter_rison]]
+ assert mock_permalink_cls.call_args_list[0].kwargs["state"]["anchor"] == "TAB-1"
+ assert mock_permalink_cls.call_args_list[1].kwargs["state"]["anchor"] == "TAB-2"
+
+
+@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
+@with_feature_flags(ALERT_REPORT_TABS=True)
+def test_get_dashboard_urls_with_filters_no_tabs(
+ mock_permalink_cls,
+ mocker: MockerFixture,
+ app,
+) -> None:
+ mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
+ mock_report_schedule.chart = False
+ mock_report_schedule.chart_id = None
+ mock_report_schedule.dashboard_id = 123
+ mock_report_schedule.type = "report_type"
+ mock_report_schedule.report_format = "report_format"
+ mock_report_schedule.owners = [1, 2]
+ mock_report_schedule.recipients = []
+ native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
+ mock_report_schedule.extra = {
+ "dashboard": {
+ "anchor": "",
+ "dataMask": {"NATIVE_FILTER-1": {"filterState": {"value": ["Sales"]}}},
+ "activeTabs": None,
+ "urlParams": None,
+ "nativeFilters": [ # type: ignore[typeddict-unknown-key]
+ {
+ "nativeFilterId": "NATIVE_FILTER-1",
+ "filterType": "filter_select",
+ "columnName": "department",
+ "filterValues": ["Sales"],
+ }
+ ],
+ }
+ }
+ mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
+ native_filter_rison,
+ [],
+ )
+ mock_permalink_cls.return_value.run.return_value = "key1"
+
+ class_instance: BaseReportState = BaseReportState(
+ mock_report_schedule, "January 1, 2021", "execution_id_example"
+ )
+ class_instance._report_schedule = mock_report_schedule
+
+ result: list[str] = class_instance.get_dashboard_urls()
+
+ import urllib.parse
+
+ base_url = app.config.get("WEBDRIVER_BASEURL", "http://0.0.0.0:8080/")
+ assert result == [
+ urllib.parse.urljoin(base_url, "superset/dashboard/p/key1/"),
+ ]
+ mock_report_schedule.get_native_filters_params.assert_called_once() # type: ignore[attr-defined]
+ assert mock_permalink_cls.call_count == 1
+ state = mock_permalink_cls.call_args_list[0].kwargs["state"]
+ assert state["urlParams"] == [["native_filters", native_filter_rison]]
+
+
+@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
+@with_feature_flags(ALERT_REPORT_TABS=True)
+def test_get_dashboard_urls_preserves_existing_url_params(
+ mock_permalink_cls,
+ mocker: MockerFixture,
+ app,
+) -> None:
+ """Existing urlParams (e.g. standalone) must survive native_filters merge."""
+ mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
+ mock_report_schedule.chart = False
+ mock_report_schedule.chart_id = None
+ mock_report_schedule.dashboard_id = 123
+ mock_report_schedule.type = "report_type"
+ mock_report_schedule.report_format = "report_format"
+ mock_report_schedule.owners = [1, 2]
+ mock_report_schedule.recipients = []
+ native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
+ mock_report_schedule.extra = {
+ "dashboard": {
+ "anchor": "",
+ "dataMask": {},
+ "activeTabs": None,
+ "urlParams": [("standalone", "true"), ("show_filters", "0")],
+ "nativeFilters": [ # type: ignore[typeddict-unknown-key]
+ {
+ "nativeFilterId": "NATIVE_FILTER-1",
+ "filterType": "filter_select",
+ "columnName": "dept",
+ "filterValues": ["Sales"],
+ }
+ ],
+ }
+ }
+ mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
+ native_filter_rison,
+ [],
+ )
+ mock_permalink_cls.return_value.run.return_value = "key1"
+
+ class_instance: BaseReportState = BaseReportState(
+ mock_report_schedule, "January 1, 2021", "execution_id_example"
+ )
+ class_instance._report_schedule = mock_report_schedule
+
+ class_instance.get_dashboard_urls()
+
+ state = mock_permalink_cls.call_args_list[0].kwargs["state"]
+ assert state["urlParams"] == [
+ ["standalone", "true"],
+ ["show_filters", "0"],
+ ["native_filters", native_filter_rison],
+ ]
+
+
+@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
+@with_feature_flags(ALERT_REPORT_TABS=True)
+def test_get_dashboard_urls_deduplicates_stale_native_filters(
+ mock_permalink_cls,
+ mocker: MockerFixture,
+ app,
+) -> None:
+ """A stale native_filters entry in urlParams is replaced, not duplicated."""
+ mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
+ mock_report_schedule.chart = False
+ mock_report_schedule.chart_id = None
+ mock_report_schedule.dashboard_id = 123
+ mock_report_schedule.type = "report_type"
+ mock_report_schedule.report_format = "report_format"
+ mock_report_schedule.owners = [1, 2]
+ mock_report_schedule.recipients = []
+ native_filter_rison = "(NATIVE_FILTER-1:(new:value))"
+ mock_report_schedule.extra = {
+ "dashboard": {
+ "anchor": "",
+ "dataMask": {},
+ "activeTabs": None,
+ "urlParams": [
+ ("standalone", "true"),
+ ("native_filters", "(old:stale_value)"),
+ ],
+ "nativeFilters": [], # type: ignore[typeddict-unknown-key]
+ }
+ }
+ mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
+ native_filter_rison,
+ [],
+ )
+ mock_permalink_cls.return_value.run.return_value = "key1"
+
+ class_instance: BaseReportState = BaseReportState(
+ mock_report_schedule, "January 1, 2021", "execution_id_example"
+ )
+ class_instance._report_schedule = mock_report_schedule
+
+ class_instance.get_dashboard_urls()
+
+ state = mock_permalink_cls.call_args_list[0].kwargs["state"]
+ assert state["urlParams"] == [
+ ["standalone", "true"],
+ ["native_filters", native_filter_rison],
+ ]
+
+
@patch(
"superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
)
@@ -596,3 +836,484 @@ def test_update_recipient_to_slack_v2_missing_channels(mocker: MockerFixture):
)
with pytest.raises(UpdateFailedError):
mock_cmmd.update_report_schedule_slack_v2()
+
+
+# ---------------------------------------------------------------------------
+# Tier 1: _update_query_context + create_log
+# ---------------------------------------------------------------------------
+
+
+def test_update_query_context_wraps_screenshot_failure(mocker: MockerFixture) -> None:
+ """_update_query_context wraps ScreenshotFailedError as CsvFailedError."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ state = BaseReportState(schedule, datetime.utcnow(), uuid4())
+ state._report_schedule = schedule
+ mocker.patch.object(
+ state,
+ "_get_screenshots",
+ side_effect=ReportScheduleScreenshotFailedError("boom"),
+ )
+ with pytest.raises(ReportScheduleCsvFailedError, match="query context"):
+ state._update_query_context()
+
+
+def test_update_query_context_wraps_screenshot_timeout(mocker: MockerFixture) -> None:
+ """_update_query_context wraps ScreenshotTimeout as CsvFailedError."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ state = BaseReportState(schedule, datetime.utcnow(), uuid4())
+ state._report_schedule = schedule
+ mocker.patch.object(
+ state,
+ "_get_screenshots",
+ side_effect=ReportScheduleScreenshotTimeout(),
+ )
+ with pytest.raises(ReportScheduleCsvFailedError, match="query context"):
+ state._update_query_context()
+
+
+def test_create_log_stale_data_raises_unexpected_error(mocker: MockerFixture) -> None:
+ """StaleDataError during create_log should rollback and raise UnexpectedError."""
+ from sqlalchemy.orm.exc import StaleDataError
+
+ schedule = mocker.Mock(spec=ReportSchedule)
+ schedule.last_value = None
+ schedule.last_value_row_json = None
+ schedule.last_state = ReportState.WORKING
+
+ state = BaseReportState(schedule, datetime.utcnow(), uuid4())
+ state._report_schedule = schedule
+
+ mock_db = mocker.patch("superset.commands.report.execute.db")
+ mock_db.session.commit.side_effect = StaleDataError("stale")
+ # Prevent SQLAlchemy from inspecting the mock schedule during log creation
+ mocker.patch(
+ "superset.commands.report.execute.ReportExecutionLog",
+ return_value=mocker.Mock(),
+ )
+
+ with pytest.raises(ReportScheduleUnexpectedError):
+ state.create_log()
+ mock_db.session.rollback.assert_called_once()
+
+
+# ---------------------------------------------------------------------------
+# Tier 2: _get_notification_content branches
+# ---------------------------------------------------------------------------
+
+
+def _make_notification_state(
+ mocker: MockerFixture,
+ *,
+ report_format: ReportDataFormat = ReportDataFormat.PNG,
+ schedule_type: ReportScheduleType = ReportScheduleType.REPORT,
+ has_chart: bool = True,
+ email_subject: str | None = None,
+ chart_name: str = "My Chart",
+ dashboard_title: str = "My Dashboard",
+) -> BaseReportState:
+ """Build a BaseReportState with a mock schedule for notification tests."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ schedule.type = schedule_type
+ schedule.report_format = report_format
+ schedule.name = "Test Schedule"
+ schedule.description = "desc"
+ schedule.email_subject = email_subject
+ schedule.force_screenshot = False
+ schedule.recipients = []
+ schedule.owners = []
+
+ if has_chart:
+ schedule.chart = mocker.Mock()
+ schedule.chart.slice_name = chart_name
+ schedule.dashboard = None
+ else:
+ schedule.chart = None
+ schedule.dashboard = mocker.Mock()
+ schedule.dashboard.dashboard_title = dashboard_title
+ schedule.dashboard.uuid = "dash-uuid"
+ schedule.dashboard.id = 1
+
+ schedule.extra = {}
+
+ state = BaseReportState(schedule, datetime.utcnow(), uuid4())
+ state._report_schedule = schedule
+
+ # Stub helpers that _get_notification_content calls
+ mocker.patch.object(state, "_get_log_data", return_value={})
+ mocker.patch.object(state, "_get_url", return_value="http://example.com")
+
+ return state
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_png_screenshot(
+ mock_ff, mocker: MockerFixture
+) -> None:
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(mocker, report_format=ReportDataFormat.PNG)
+ mocker.patch.object(state, "_get_screenshots", return_value=[b"img1", b"img2"])
+
+ content = state._get_notification_content()
+ assert content.screenshots == [b"img1", b"img2"]
+ assert content.text is None
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_png_empty_returns_error(
+ mock_ff, mocker: MockerFixture
+) -> None:
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(mocker, report_format=ReportDataFormat.PNG)
+ mocker.patch.object(state, "_get_screenshots", return_value=[])
+
+ content = state._get_notification_content()
+ assert content.text == "Unexpected missing screenshot"
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_csv_format(mock_ff, mocker: MockerFixture) -> None:
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(
+ mocker, report_format=ReportDataFormat.CSV, has_chart=True
+ )
+ mocker.patch.object(state, "_get_csv_data", return_value=b"col1,col2\n1,2")
+
+ content = state._get_notification_content()
+ assert content.csv == b"col1,col2\n1,2"
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_text_format(mock_ff, mocker: MockerFixture) -> None:
+ import pandas as pd
+
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(
+ mocker, report_format=ReportDataFormat.TEXT, has_chart=True
+ )
+ df = pd.DataFrame({"a": [1]})
+ mocker.patch.object(state, "_get_embedded_data", return_value=df)
+
+ content = state._get_notification_content()
+ assert content.embedded_data is not None
+ assert list(content.embedded_data.columns) == ["a"]
+
+
+@pytest.mark.parametrize(
+ "email_subject,has_chart,expected_name",
+ [
+ ("Custom Subject", True, "Custom Subject"),
+ (None, True, "Test Schedule: My Chart"),
+ (None, False, "Test Schedule: My Dashboard"),
+ ],
+ ids=["email_subject", "chart_name", "dashboard_name"],
+)
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_name(
+ mock_ff,
+ mocker: MockerFixture,
+ email_subject: str | None,
+ has_chart: bool,
+ expected_name: str,
+) -> None:
+ """Notification name comes from email_subject, chart, or dashboard."""
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(
+ mocker,
+ report_format=ReportDataFormat.PNG,
+ email_subject=email_subject,
+ has_chart=has_chart,
+ )
+ mocker.patch.object(state, "_get_screenshots", return_value=[b"img"])
+
+ content = state._get_notification_content()
+ assert content.name == expected_name
+
+
+# ---------------------------------------------------------------------------
+# Tier 3: State machine top-level branches
+# ---------------------------------------------------------------------------
+
+
+def _make_state_instance(
+ mocker: MockerFixture,
+ cls: type,
+ *,
+ schedule_type: ReportScheduleType = ReportScheduleType.ALERT,
+ last_state: ReportState = ReportState.NOOP,
+ grace_period: int = 3600,
+ working_timeout: int = 3600,
+) -> BaseReportState:
+ """Create a state-machine state instance with a mocked schedule."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ schedule.type = schedule_type
+ schedule.last_state = last_state
+ schedule.grace_period = grace_period
+ schedule.working_timeout = working_timeout
+ schedule.last_eval_dttm = datetime.utcnow()
+ schedule.name = "Test"
+ schedule.owners = []
+ schedule.recipients = []
+ schedule.force_screenshot = False
+ schedule.extra = {}
+
+ instance = cls(schedule, datetime.utcnow(), uuid4())
+ instance._report_schedule = schedule
+ return instance
+
+
+def test_working_state_timeout_raises_timeout_error(mocker: MockerFixture) -> None:
+ """Working state past timeout should raise WorkingTimeoutError and log ERROR."""
+ state = _make_state_instance(mocker, ReportWorkingState)
+ mocker.patch.object(state, "is_on_working_timeout", return_value=True)
+
+ mock_log = mocker.Mock()
+ mock_log.end_dttm = datetime.utcnow() - timedelta(hours=2)
+ mocker.patch(
+ "superset.commands.report.execute.ReportScheduleDAO.find_last_entered_working_log",
+ return_value=mock_log,
+ )
+ mocker.patch.object(state, "update_report_schedule_and_log")
+
+ with pytest.raises(ReportScheduleWorkingTimeoutError):
+ state.next()
+
+ state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
+ ReportState.ERROR,
+ error_message=str(ReportScheduleWorkingTimeoutError()),
+ )
+
+
+def test_working_state_still_working_raises_previous_working(
+ mocker: MockerFixture,
+) -> None:
+ """Working state not yet timed out should raise PreviousWorkingError."""
+ state = _make_state_instance(mocker, ReportWorkingState)
+ mocker.patch.object(state, "is_on_working_timeout", return_value=False)
+ mocker.patch.object(state, "update_report_schedule_and_log")
+
+ with pytest.raises(ReportSchedulePreviousWorkingError):
+ state.next()
+
+ state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
+ ReportState.WORKING,
+ error_message=str(ReportSchedulePreviousWorkingError()),
+ )
+
+
+def test_success_state_grace_period_returns_without_sending(
+ mocker: MockerFixture,
+) -> None:
+ """Alert in grace period should set GRACE state and not send."""
+ state = _make_state_instance(
+ mocker,
+ ReportSuccessState,
+ schedule_type=ReportScheduleType.ALERT,
+ )
+ mocker.patch.object(state, "is_in_grace_period", return_value=True)
+ mocker.patch.object(state, "update_report_schedule_and_log")
+ mock_send = mocker.patch.object(state, "send")
+
+ state.next()
+
+ mock_send.assert_not_called()
+ state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
+ ReportState.GRACE,
+ error_message=str(ReportScheduleAlertGracePeriodError()),
+ )
+
+
+def test_not_triggered_error_state_send_failure_logs_error_and_reraises(
+ mocker: MockerFixture,
+) -> None:
+ """When send() fails in NOOP/ERROR state, error should be logged and re-raised."""
+ state = _make_state_instance(
+ mocker,
+ ReportNotTriggeredErrorState,
+ schedule_type=ReportScheduleType.REPORT,
+ )
+ send_error = RuntimeError("send failed")
+ mocker.patch.object(state, "send", side_effect=send_error)
+ mocker.patch.object(state, "update_report_schedule_and_log")
+ mocker.patch.object(state, "is_in_error_grace_period", return_value=True)
+
+ with pytest.raises(RuntimeError, match="send failed"):
+ state.next()
+
+ # Should have logged WORKING, then ERROR
+ calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
+ assert calls[0].args[0] == ReportState.WORKING
+ assert calls[1].args[0] == ReportState.ERROR
+ error_msg = calls[1].kwargs.get("error_message") or (
+ calls[1].args[1] if len(calls[1].args) > 1 else ""
+ )
+ assert "send failed" in error_msg
+
+
+# ---------------------------------------------------------------------------
+# Phase 1 remaining gaps
+# ---------------------------------------------------------------------------
+
+
+def test_get_dashboard_urls_no_state_fallback(
+ mocker: MockerFixture, app: SupersetApp
+) -> None:
+ """No dashboard state in extra -> standard dashboard URL, not permalink."""
+ mock_report_schedule = mocker.Mock(spec=ReportSchedule)
+ mock_report_schedule.chart = False
+ mock_report_schedule.force_screenshot = False
+ mock_report_schedule.extra = {} # no dashboard state
+ mock_report_schedule.dashboard = mocker.Mock()
+ mock_report_schedule.dashboard.uuid = "dash-uuid-123"
+ mock_report_schedule.dashboard.id = 42
+ mock_report_schedule.recipients = []
+
+ state = BaseReportState(mock_report_schedule, "Jan 1", "exec_id")
+ state._report_schedule = mock_report_schedule
+
+ result = state.get_dashboard_urls()
+
+ assert len(result) == 1
+ assert "superset/dashboard/" in result[0]
+ assert "dashboard/p/" not in result[0] # not a permalink
+
+
+def test_success_state_alert_command_error_sends_error_and_reraises(
+ mocker: MockerFixture,
+) -> None:
+ """AlertCommand exception -> send_error + ERROR state with marker."""
+ state = _make_state_instance(
+ mocker, ReportSuccessState, schedule_type=ReportScheduleType.ALERT
+ )
+ mocker.patch.object(state, "is_in_grace_period", return_value=False)
+ mocker.patch.object(state, "update_report_schedule_and_log")
+ mocker.patch.object(state, "send_error")
+ mocker.patch(
+ "superset.commands.report.execute.AlertCommand"
+ ).return_value.run.side_effect = RuntimeError("alert boom")
+
+ with pytest.raises(RuntimeError, match="alert boom"):
+ state.next()
+
+ state.send_error.assert_called_once() # type: ignore[attr-defined]
+ calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
+ # First call: WORKING, second call: ERROR with marker
+ assert calls[0].args[0] == ReportState.WORKING
+ assert calls[1].args[0] == ReportState.ERROR
+ assert (
+ calls[1].kwargs.get("error_message")
+ == REPORT_SCHEDULE_ERROR_NOTIFICATION_MARKER
+ )
+
+
+def test_success_state_send_error_logs_and_reraises(
+ mocker: MockerFixture,
+) -> None:
+ """send() exception for REPORT type -> ERROR state + re-raise."""
+ state = _make_state_instance(
+ mocker, ReportSuccessState, schedule_type=ReportScheduleType.REPORT
+ )
+ mocker.patch.object(state, "send", side_effect=RuntimeError("send boom"))
+ mocker.patch.object(state, "update_report_schedule_and_log")
+
+ with pytest.raises(RuntimeError, match="send boom"):
+ state.next()
+
+ calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
+ assert calls[-1].args[0] == ReportState.ERROR
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_pdf_format(mock_ff, mocker: MockerFixture) -> None:
+ """PDF report format branch produces pdf content."""
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(mocker, report_format=ReportDataFormat.PDF)
+ mocker.patch.object(state, "_get_pdf", return_value=b"%PDF-fake")
+
+ content = state._get_notification_content()
+ assert content.pdf == b"%PDF-fake"
+ assert content.text is None
+
+
+# ---------------------------------------------------------------------------
+# Phase 1 gap closure: state machine, feature flag, create_log, success path
+# ---------------------------------------------------------------------------
+
+
+def test_state_machine_unknown_state_raises_not_found(
+ mocker: MockerFixture,
+) -> None:
+ """State machine raises StateNotFoundError when last_state matches no class."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ # Use a string that isn't in any state class's current_states
+ schedule.last_state = "NONEXISTENT_STATE"
+
+ sm = ReportScheduleStateMachine(uuid4(), schedule, datetime.utcnow())
+ with pytest.raises(ReportScheduleStateNotFoundError):
+ sm.run()
+
+
+@patch("superset.commands.report.execute.feature_flag_manager")
+def test_get_notification_content_alert_no_flag_skips_attachment(
+ mock_ff, mocker: MockerFixture
+) -> None:
+ """Alert with ALERTS_ATTACH_REPORTS=False skips screenshot/pdf/csv attachment."""
+ mock_ff.is_feature_enabled.return_value = False
+ state = _make_notification_state(
+ mocker,
+ report_format=ReportDataFormat.PNG,
+ schedule_type=ReportScheduleType.ALERT,
+ has_chart=True,
+ )
+ mock_screenshots = mocker.patch.object(state, "_get_screenshots")
+
+ content = state._get_notification_content()
+
+ # _get_screenshots should NOT be called — the attachment block is skipped
+ mock_screenshots.assert_not_called()
+ assert content.screenshots == []
+ assert content.text is None
+
+
+def test_create_log_success_commits(mocker: MockerFixture) -> None:
+ """Successful create_log creates a log entry and commits."""
+ schedule = mocker.Mock(spec=ReportSchedule)
+ schedule.last_value = "42"
+ schedule.last_value_row_json = '{"col": 42}'
+ schedule.last_state = ReportState.SUCCESS
+
+ state = BaseReportState(schedule, datetime.utcnow(), uuid4())
+ state._report_schedule = schedule
+
+ mock_db = mocker.patch("superset.commands.report.execute.db")
+ mock_log_cls = mocker.patch(
+ "superset.commands.report.execute.ReportExecutionLog",
+ return_value=mocker.Mock(),
+ )
+
+ state.create_log(error_message=None)
+
+ mock_log_cls.assert_called_once()
+ mock_db.session.add.assert_called_once()
+ mock_db.session.commit.assert_called_once()
+ mock_db.session.rollback.assert_not_called()
+
+
+def test_success_state_report_sends_and_logs_success(
+ mocker: MockerFixture,
+) -> None:
+ """REPORT type success path: send() + update state to SUCCESS."""
+ state = _make_state_instance(
+ mocker,
+ ReportSuccessState,
+ schedule_type=ReportScheduleType.REPORT,
+ )
+ mock_send = mocker.patch.object(state, "send")
+ mocker.patch.object(state, "update_report_schedule_and_log")
+
+ state.next()
+
+ mock_send.assert_called_once()
+ state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
+ ReportState.SUCCESS,
+ error_message=None,
+ )
diff --git a/tests/unit_tests/commands/report/update_test.py b/tests/unit_tests/commands/report/update_test.py
index 6b515781b4d..32be92073c4 100644
--- a/tests/unit_tests/commands/report/update_test.py
+++ b/tests/unit_tests/commands/report/update_test.py
@@ -24,10 +24,13 @@ import pytest
from pytest_mock import MockerFixture
from superset.commands.report.exceptions import (
+ ReportScheduleForbiddenError,
ReportScheduleInvalidError,
)
from superset.commands.report.update import UpdateReportScheduleCommand
-from superset.reports.models import ReportScheduleType
+from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
+from superset.exceptions import SupersetSecurityException
+from superset.reports.models import ReportScheduleType, ReportState
def _make_model(
@@ -252,3 +255,71 @@ def test_report_to_alert_with_db_accepted(mocker: MockerFixture) -> None:
data={"type": ReportScheduleType.ALERT, "database": 5},
)
cmd.validate() # should not raise
+
+
+# --- Deactivation state reset ---
+
+
+def test_deactivation_resets_working_state_to_noop(mocker: MockerFixture) -> None:
+ """Deactivating a report in WORKING state should reset last_state to NOOP."""
+ model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
+ model.last_state = ReportState.WORKING
+ _setup_mocks(mocker, model)
+
+ cmd = UpdateReportScheduleCommand(model_id=1, data={"active": False})
+ cmd.validate()
+ assert cmd._properties["last_state"] == ReportState.NOOP
+
+
+def test_deactivation_from_non_working_does_not_reset(mocker: MockerFixture) -> None:
+ """Deactivating a report NOT in WORKING state should not touch last_state."""
+ model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
+ model.last_state = ReportState.SUCCESS
+ _setup_mocks(mocker, model)
+
+ cmd = UpdateReportScheduleCommand(model_id=1, data={"active": False})
+ cmd.validate()
+ assert "last_state" not in cmd._properties
+
+
+# --- Ownership check ---
+
+
+def test_ownership_check_raises_forbidden(mocker: MockerFixture) -> None:
+ """Non-owner should get ReportScheduleForbiddenError."""
+ model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
+ _setup_mocks(mocker, model)
+ mocker.patch(
+ "superset.commands.report.update.security_manager.raise_for_ownership",
+ side_effect=SupersetSecurityException(
+ SupersetError(
+ message="Forbidden",
+ error_type=SupersetErrorType.GENERIC_BACKEND_ERROR,
+ level=ErrorLevel.ERROR,
+ )
+ ),
+ )
+
+ cmd = UpdateReportScheduleCommand(model_id=1, data={})
+ with pytest.raises(ReportScheduleForbiddenError):
+ cmd.validate()
+
+
+# --- Database not found for alert ---
+
+
+def test_alert_with_nonexistent_database_rejected(mocker: MockerFixture) -> None:
+ """Alert with a database ID that doesn't exist should fail validation."""
+ model = _make_model(mocker, model_type=ReportScheduleType.ALERT, database_id=None)
+ _setup_mocks(mocker, model)
+ mocker.patch(
+ "superset.commands.report.update.DatabaseDAO.find_by_id",
+ return_value=None,
+ )
+
+ cmd = UpdateReportScheduleCommand(model_id=1, data={"database": 99999})
+ with pytest.raises(ReportScheduleInvalidError) as exc_info:
+ cmd.validate()
+ messages = _get_validation_messages(exc_info)
+ assert "database" in messages
+ assert "does not exist" in messages["database"].lower()
diff --git a/tests/unit_tests/reports/api_test.py b/tests/unit_tests/reports/api_test.py
new file mode 100644
index 00000000000..6656b623bb1
--- /dev/null
+++ b/tests/unit_tests/reports/api_test.py
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from typing import Any
+from unittest.mock import patch
+
+import prison
+
+from superset.exceptions import SupersetException
+from tests.unit_tests.conftest import with_feature_flags
+
+
+@with_feature_flags(ALERT_REPORTS=True)
+@patch("superset.reports.api.get_channels_with_search")
+def test_slack_channels_success(
+ mock_search: Any,
+ client: Any,
+ full_api_access: None,
+) -> None:
+ mock_search.return_value = [{"id": "C123", "name": "general"}]
+ params = prison.dumps({})
+ rv = client.get(f"/api/v1/report/slack_channels/?q={params}")
+ assert rv.status_code == 200
+ data = rv.json
+ assert data["result"] == [{"id": "C123", "name": "general"}]
+
+
+@with_feature_flags(ALERT_REPORTS=True)
+@patch("superset.reports.api.get_channels_with_search")
+def test_slack_channels_handles_superset_exception(
+ mock_search: Any,
+ client: Any,
+ full_api_access: None,
+) -> None:
+ mock_search.side_effect = SupersetException("Slack API error")
+ params = prison.dumps({})
+ rv = client.get(f"/api/v1/report/slack_channels/?q={params}")
+ assert rv.status_code == 422
+ assert "Slack API error" in rv.json["message"]
diff --git a/tests/unit_tests/reports/dao_test.py b/tests/unit_tests/reports/dao_test.py
new file mode 100644
index 00000000000..d77c42a9fd7
--- /dev/null
+++ b/tests/unit_tests/reports/dao_test.py
@@ -0,0 +1,91 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from unittest.mock import MagicMock, patch
+
+
+@patch("superset.daos.report.get_user_id", return_value=1)
+@patch("superset.daos.report.db")
+def test_validate_unique_creation_method_duplicate_returns_false(
+ mock_db: MagicMock,
+ mock_uid: MagicMock,
+) -> None:
+ from superset.daos.report import ReportScheduleDAO
+
+ # Simulate that a matching report already exists
+ mock_db.session.query.return_value.filter_by.return_value.filter.return_value = (
+ MagicMock()
+ )
+ mock_db.session.query.return_value.scalar.return_value = True
+ assert ReportScheduleDAO.validate_unique_creation_method(dashboard_id=1) is False
+
+
+@patch("superset.daos.report.get_user_id", return_value=1)
+@patch("superset.daos.report.db")
+def test_validate_unique_creation_method_no_duplicate_returns_true(
+ mock_db: MagicMock,
+ mock_uid: MagicMock,
+) -> None:
+ from superset.daos.report import ReportScheduleDAO
+
+ mock_db.session.query.return_value.filter_by.return_value.filter.return_value = (
+ MagicMock()
+ )
+ mock_db.session.query.return_value.scalar.return_value = False
+ assert ReportScheduleDAO.validate_unique_creation_method(dashboard_id=1) is True
+
+
+@patch("superset.daos.report.db")
+def test_find_last_error_notification_returns_none_after_success(
+ mock_db: MagicMock,
+) -> None:
+ from superset.daos.report import ReportScheduleDAO
+
+ schedule = MagicMock()
+ error_log = MagicMock()
+ success_log = MagicMock()
+
+ # Build the query chain so each .query().filter().order_by().first() call
+ # returns a different result. The DAO calls db.session.query() twice:
+ # 1st call finds the error marker log
+ # 2nd call finds a non-error log after it (success happened since last error email)
+ query_mock = MagicMock()
+ mock_db.session.query.return_value = query_mock
+ chain = query_mock.filter.return_value.order_by.return_value
+ chain.first.side_effect = [error_log, success_log]
+
+ result = ReportScheduleDAO.find_last_error_notification(schedule)
+ # Success log exists after error → should return None (no re-notification needed)
+ assert result is None
+
+
+@patch("superset.daos.report.db")
+def test_find_last_error_notification_returns_log_when_only_errors(
+ mock_db: MagicMock,
+) -> None:
+ from superset.daos.report import ReportScheduleDAO
+
+ schedule = MagicMock()
+ error_log = MagicMock()
+
+ query_mock = MagicMock()
+ mock_db.session.query.return_value = query_mock
+ chain = query_mock.filter.return_value.order_by.return_value
+ # 1st call: error marker log found; 2nd call: no success log after it
+ chain.first.side_effect = [error_log, None]
+
+ result = ReportScheduleDAO.find_last_error_notification(schedule)
+ assert result is error_log
diff --git a/tests/unit_tests/reports/filters_test.py b/tests/unit_tests/reports/filters_test.py
new file mode 100644
index 00000000000..308eb439136
--- /dev/null
+++ b/tests/unit_tests/reports/filters_test.py
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from unittest.mock import MagicMock, patch
+
+
+@patch("superset.reports.filters.security_manager", new_callable=MagicMock)
+def test_report_schedule_filter_admin_sees_all(mock_sm: MagicMock) -> None:
+ from superset.reports.filters import ReportScheduleFilter
+
+ mock_sm.can_access_all_datasources.return_value = True
+ query = MagicMock()
+ f = ReportScheduleFilter("id", MagicMock())
+ result = f.apply(query, None)
+ assert result is query
+ query.filter.assert_not_called()
+
+
+@patch("superset.reports.filters.security_manager", new_callable=MagicMock)
+@patch("superset.reports.filters.db")
+def test_report_schedule_filter_non_admin_filtered(
+ mock_db: MagicMock, mock_sm: MagicMock
+) -> None:
+ from superset.reports.filters import ReportScheduleFilter
+
+ mock_sm.can_access_all_datasources.return_value = False
+ mock_sm.user_model.get_user_id.return_value = 1
+ mock_sm.user_model.id = 1
+ query = MagicMock()
+ f = ReportScheduleFilter("id", MagicMock())
+ f.apply(query, None)
+ query.filter.assert_called_once()
+
+
+def test_report_schedule_all_text_filter_empty_noop() -> None:
+ from superset.reports.filters import ReportScheduleAllTextFilter
+
+ query = MagicMock()
+ f = ReportScheduleAllTextFilter("name", MagicMock())
+ result = f.apply(query, "")
+ assert result is query
+ query.filter.assert_not_called()
+
+
+def test_report_schedule_all_text_filter_applies_ilike() -> None:
+ from superset.reports.filters import ReportScheduleAllTextFilter
+
+ query = MagicMock()
+ f = ReportScheduleAllTextFilter("name", MagicMock())
+ f.apply(query, "test")
+ query.filter.assert_called_once()
diff --git a/tests/unit_tests/reports/model_test.py b/tests/unit_tests/reports/model_test.py
index 33ad3461dbf..24158b65d5d 100644
--- a/tests/unit_tests/reports/model_test.py
+++ b/tests/unit_tests/reports/model_test.py
@@ -323,6 +323,124 @@ def test_report_generate_native_filter_no_column_name():
assert warning is None
+def test_report_generate_native_filter_select_null_column():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F1", "filter_select", None, ["US"]
+ )
+ assert result["F1"]["extraFormData"]["filters"][0]["col"] == ""
+ assert result["F1"]["filterState"]["label"] == ""
+ assert warning is None
+
+
+def test_generate_native_filter_time_normal():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F2", "filter_time", "ignored", ["Last week"]
+ )
+ assert result == {
+ "F2": {
+ "id": "F2",
+ "extraFormData": {"time_range": "Last week"},
+ "filterState": {"value": "Last week"},
+ "ownState": {},
+ }
+ }
+ assert warning is None
+
+
+def test_generate_native_filter_timegrain_normal():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F3", "filter_timegrain", "ignored", ["P1D"]
+ )
+ assert result == {
+ "F3": {
+ "id": "F3",
+ "extraFormData": {"time_grain_sqla": "P1D"},
+ "filterState": {"value": ["P1D"]},
+ "ownState": {},
+ }
+ }
+ assert warning is None
+
+
+def test_generate_native_filter_timecolumn_normal():
+ """filter_timecolumn is the only branch missing 'id' in its output."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F4", "filter_timecolumn", "ignored", ["ds"]
+ )
+ assert result == {
+ "F4": {
+ "extraFormData": {"granularity_sqla": "ds"},
+ "filterState": {"value": ["ds"]},
+ }
+ }
+ assert "id" not in result["F4"]
+ assert warning is None
+
+
+def test_generate_native_filter_range_normal():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [10, 100]
+ )
+ assert result == {
+ "F5": {
+ "id": "F5",
+ "extraFormData": {
+ "filters": [
+ {"col": "price", "op": ">=", "val": 10},
+ {"col": "price", "op": "<=", "val": 100},
+ ]
+ },
+ "filterState": {
+ "value": [10, 100],
+ "label": "10 ≤ x ≤ 100",
+ },
+ "ownState": {},
+ }
+ }
+ assert warning is None
+
+
+def test_generate_native_filter_range_min_only():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [10]
+ )
+ assert result["F5"]["extraFormData"]["filters"] == [
+ {"col": "price", "op": ">=", "val": 10}
+ ]
+ assert result["F5"]["filterState"]["label"] == "x ≥ 10"
+ assert result["F5"]["filterState"]["value"] == [10, None]
+ assert warning is None
+
+
+def test_generate_native_filter_range_max_only():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [None, 100]
+ )
+ assert result["F5"]["extraFormData"]["filters"] == [
+ {"col": "price", "op": "<=", "val": 100}
+ ]
+ assert result["F5"]["filterState"]["label"] == "x ≤ 100"
+ assert warning is None
+
+
+def test_generate_native_filter_range_empty_values():
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", []
+ )
+ assert result["F5"]["extraFormData"]["filters"] == []
+ assert result["F5"]["filterState"]["label"] == ""
+ assert result["F5"]["filterState"]["value"] == [None, None]
+ assert warning is None
+
+
def test_report_generate_native_filter_unknown_filter_type():
"""
Test the ``_generate_native_filter`` method with an unknown filter type.
@@ -344,6 +462,34 @@ def test_report_generate_native_filter_unknown_filter_type():
assert "filter_id" in warning
+def test_get_native_filters_params_null_native_filters():
+ report_schedule = ReportSchedule()
+ report_schedule.extra = {"dashboard": {"nativeFilters": None}}
+ result, warnings = report_schedule.get_native_filters_params()
+ assert result == "()"
+ assert warnings == []
+
+
+def test_get_native_filters_params_rison_quote_escaping():
+ report_schedule = ReportSchedule()
+ report_schedule.extra = {
+ "dashboard": {
+ "nativeFilters": [
+ {
+ "nativeFilterId": "F1",
+ "filterType": "filter_select",
+ "columnName": "name",
+ "filterValues": ["O'Brien"],
+ }
+ ]
+ }
+ }
+ result, warnings = report_schedule.get_native_filters_params()
+ assert "'" not in result
+ assert "%27" in result
+ assert warnings == []
+
+
def test_get_native_filters_params_unknown_filter_type():
"""
Test the ``get_native_filters_params`` method with an unknown filter type.
@@ -378,3 +524,135 @@ def test_get_native_filters_params_unknown_filter_type():
assert len(warnings) == 1
assert "unrecognized filter type" in warnings[0]
assert "filter_unknown_type" in warnings[0]
+
+
+def test_get_native_filters_params_missing_filter_id_key():
+ report_schedule = ReportSchedule()
+ report_schedule.extra = {
+ "dashboard": {
+ "nativeFilters": [
+ {
+ "filterType": "filter_select",
+ "columnName": "col",
+ "filterValues": ["v"],
+ # Missing "nativeFilterId" key — skipped by defensive guard
+ }
+ ]
+ }
+ }
+ result, warnings = report_schedule.get_native_filters_params()
+ assert result == "()"
+ assert len(warnings) == 1
+ assert "Skipping malformed native filter" in warnings[0]
+
+
+def test_generate_native_filter_empty_filter_id():
+ """Empty native_filter_id triggers the ``or ""`` fallback branches."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "", "filter_select", "col", ["x"]
+ )
+ assert "" in result
+ assert result[""]["id"] == ""
+ assert warning is None
+
+
+def test_generate_native_filter_range_zero_min():
+ """Zero min_val should produce a two-sided label, not a max-only label."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [0, 100]
+ )
+ assert result["F5"]["extraFormData"]["filters"] == [
+ {"col": "price", "op": ">=", "val": 0},
+ {"col": "price", "op": "<=", "val": 100},
+ ]
+ # Label generation correctly treats zero as a valid bound
+ assert result["F5"]["filterState"]["label"] == "0 ≤ x ≤ 100"
+
+
+def test_generate_native_filter_range_zero_max():
+ """Zero max_val should produce a two-sided label, not a min-only label."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [10, 0]
+ )
+ assert result["F5"]["extraFormData"]["filters"] == [
+ {"col": "price", "op": ">=", "val": 10},
+ {"col": "price", "op": "<=", "val": 0},
+ ]
+ assert result["F5"]["filterState"]["label"] == "10 ≤ x ≤ 0"
+
+
+def test_generate_native_filter_range_both_zero():
+ """Both values zero should produce a two-sided label, not an empty string."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", "price", [0, 0]
+ )
+ assert result["F5"]["extraFormData"]["filters"] == [
+ {"col": "price", "op": ">=", "val": 0},
+ {"col": "price", "op": "<=", "val": 0},
+ ]
+ assert result["F5"]["filterState"]["label"] == "0 ≤ x ≤ 0"
+
+
+def test_get_native_filters_params_missing_filter_type():
+ """Missing filterType skips the filter and emits a warning."""
+ report_schedule = ReportSchedule()
+ report_schedule.extra = {
+ "dashboard": {
+ "nativeFilters": [
+ {
+ "nativeFilterId": "F1",
+ "columnName": "col",
+ "filterValues": ["v"],
+ }
+ ]
+ }
+ }
+ result, warnings = report_schedule.get_native_filters_params()
+ assert result == "()"
+ assert len(warnings) == 1
+ assert "Skipping malformed native filter" in warnings[0]
+
+
+def test_get_native_filters_params_missing_column_name():
+ """Missing columnName defaults to empty string via .get() fallback."""
+ report_schedule = ReportSchedule()
+ report_schedule.extra = {
+ "dashboard": {
+ "nativeFilters": [
+ {
+ "nativeFilterId": "F1",
+ "filterType": "filter_select",
+ "filterValues": ["v"],
+ }
+ ]
+ }
+ }
+ result, warnings = report_schedule.get_native_filters_params()
+ assert "F1" in result
+ assert warnings == []
+
+
+def test_generate_native_filter_range_null_column():
+ """Range filter with None column_name falls back to empty string."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "F5", "filter_range", None, [10, 100]
+ )
+ assert result["F5"]["extraFormData"]["filters"][0]["col"] == ""
+ assert result["F5"]["extraFormData"]["filters"][1]["col"] == ""
+ assert warning is None
+
+
+def test_generate_native_filter_time_empty_id():
+ """Empty string filter ID for filter_time uses the ``or ""`` fallback."""
+ report_schedule = ReportSchedule()
+ result, warning = report_schedule._generate_native_filter(
+ "", "filter_time", "ignored", ["Last week"]
+ )
+ assert "" in result
+ assert result[""]["id"] == ""
+ assert warning is None
diff --git a/tests/unit_tests/reports/notifications/slack_tests.py b/tests/unit_tests/reports/notifications/slack_tests.py
index 1457544ec7e..d1a1a99d95e 100644
--- a/tests/unit_tests/reports/notifications/slack_tests.py
+++ b/tests/unit_tests/reports/notifications/slack_tests.py
@@ -377,3 +377,57 @@ def test_send_slack_no_feature_flag(
```
""",
)
+
+
+@patch("superset.reports.notifications.slackv2.g")
+@patch("superset.reports.notifications.slackv2.get_slack_client")
+def test_slackv2_send_without_channels_raises(
+ slack_client_mock: MagicMock,
+ flask_global_mock: MagicMock,
+ mock_header_data,
+) -> None:
+ from superset.reports.models import ReportRecipients, ReportRecipientType
+ from superset.reports.notifications.base import NotificationContent
+ from superset.reports.notifications.exceptions import NotificationParamException
+
+ flask_global_mock.logs_context = {}
+ content = NotificationContent(name="test", header_data=mock_header_data)
+ notification = SlackV2Notification(
+ recipient=ReportRecipients(
+ type=ReportRecipientType.SLACKV2,
+ recipient_config_json='{"target": ""}',
+ ),
+ content=content,
+ )
+ with pytest.raises(NotificationParamException, match="No recipients"):
+ notification.send()
+
+
+@patch("superset.reports.notifications.slackv2.g")
+@patch("superset.reports.notifications.slackv2.get_slack_client")
+def test_slack_mixin_get_body_truncates_large_table(
+ slack_client_mock: MagicMock,
+ flask_global_mock: MagicMock,
+ mock_header_data,
+) -> None:
+ from superset.reports.models import ReportRecipients, ReportRecipientType
+ from superset.reports.notifications.base import NotificationContent
+
+ flask_global_mock.logs_context = {}
+ # Create a large DataFrame that exceeds the 4000-char message limit
+ large_df = pd.DataFrame({"col_" + str(i): range(100) for i in range(10)})
+ content = NotificationContent(
+ name="test",
+ header_data=mock_header_data,
+ embedded_data=large_df,
+ description="desc",
+ )
+ notification = SlackV2Notification(
+ recipient=ReportRecipients(
+ type=ReportRecipientType.SLACKV2,
+ recipient_config_json='{"target": "some_channel"}',
+ ),
+ content=content,
+ )
+ body = notification._get_body(content=content)
+ assert "(table was truncated)" in body
diff --git a/tests/unit_tests/reports/schemas_test.py b/tests/unit_tests/reports/schemas_test.py
index cf72149070b..49e7fdfe649 100644
--- a/tests/unit_tests/reports/schemas_test.py
+++ b/tests/unit_tests/reports/schemas_test.py
@@ -19,7 +19,7 @@ import pytest
from marshmallow import ValidationError
from pytest_mock import MockerFixture
-from superset.reports.schemas import ReportSchedulePostSchema
+from superset.reports.schemas import ReportSchedulePostSchema, ReportSchedulePutSchema
def test_report_post_schema_custom_width_validation(mocker: MockerFixture) -> None:
@@ -75,3 +75,184 @@ def test_report_post_schema_custom_width_validation(mocker: MockerFixture) -> No
assert excinfo.value.messages == {
"custom_width": ["Screenshot width must be between 100px and 200px"]
}
+
+
+MINIMAL_POST_PAYLOAD = {
+ "type": "Report",
+ "name": "A report",
+ "crontab": "* * * * *",
+ "timezone": "America/Los_Angeles",
+}
+
+CUSTOM_WIDTH_CONFIG = {
+ "ALERT_REPORTS_MIN_CUSTOM_SCREENSHOT_WIDTH": 600,
+ "ALERT_REPORTS_MAX_CUSTOM_SCREENSHOT_WIDTH": 2400,
+}
+
+
+@pytest.mark.parametrize(
+ "schema_class,payload_base",
+ [
+ (ReportSchedulePostSchema, MINIMAL_POST_PAYLOAD),
+ (ReportSchedulePutSchema, {}),
+ ],
+ ids=["post", "put"],
+)
+@pytest.mark.parametrize(
+ "width,should_pass",
+ [
+ (599, False),
+ (600, True),
+ (2400, True),
+ (2401, False),
+ (None, True),
+ ],
+)
+def test_custom_width_boundary_values(
+ mocker: MockerFixture,
+ schema_class: type,
+ payload_base: dict[str, object],
+ width: int | None,
+ should_pass: bool,
+) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = schema_class()
+ payload = {**payload_base, "custom_width": width}
+
+ if should_pass:
+ schema.load(payload)
+ else:
+ with pytest.raises(ValidationError) as exc:
+ schema.load(payload)
+ assert "custom_width" in exc.value.messages
+
+
+def test_working_timeout_validation(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ post_schema = ReportSchedulePostSchema()
+ put_schema = ReportSchedulePutSchema()
+
+ # POST: working_timeout=0 and -1 are invalid (min=1)
+ with pytest.raises(ValidationError) as exc:
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 0})
+ assert "working_timeout" in exc.value.messages
+
+ with pytest.raises(ValidationError) as exc:
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": -1})
+ assert "working_timeout" in exc.value.messages
+
+ # POST: working_timeout=1 is valid
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 1})
+
+ # PUT: working_timeout=None is valid (allow_none=True)
+ put_schema.load({"working_timeout": None})
+
+
+def test_log_retention_post_vs_put_parity(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ post_schema = ReportSchedulePostSchema()
+ put_schema = ReportSchedulePutSchema()
+
+ # POST: log_retention=0 is invalid (min=1)
+ with pytest.raises(ValidationError) as exc:
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 0})
+ assert "log_retention" in exc.value.messages
+
+ # POST: log_retention=1 is valid
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 1})
+
+ # PUT: log_retention=0 is valid (min=0)
+ put_schema.load({"log_retention": 0})
+
+
+def test_report_type_disallows_database(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+
+ with pytest.raises(ValidationError) as exc:
+ schema.load({**MINIMAL_POST_PAYLOAD, "database": 1})
+ assert "database" in exc.value.messages
+
+
+def test_alert_type_allows_database(mocker: MockerFixture) -> None:
+ """Alert type should accept database; only Report type blocks it."""
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+ result = schema.load({**MINIMAL_POST_PAYLOAD, "type": "Alert", "database": 1})
+ assert result["database"] == 1
+
+
+# ---------------------------------------------------------------------------
+# Phase 1b gap closure: crontab validator, name length, PUT parity
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.parametrize(
+ "crontab,should_pass",
+ [
+ ("* * * * *", True),
+ ("0 0 * * 0", True),
+ ("*/5 * * * *", True),
+ ("not a cron", False),
+ ("* * * *", False), # too few fields
+ ("", False),
+ ],
+ ids=["every-min", "weekly", "every-5", "invalid-text", "too-few-fields", "empty"],
+)
+def test_crontab_validation(
+ mocker: MockerFixture,
+ crontab: str,
+ should_pass: bool,
+) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+ payload = {**MINIMAL_POST_PAYLOAD, "crontab": crontab}
+
+ if should_pass:
+ result = schema.load(payload)
+ assert result["crontab"] == crontab
+ else:
+ with pytest.raises(ValidationError) as exc:
+ schema.load(payload)
+ assert "crontab" in exc.value.messages
+
+
+def test_name_empty_rejected(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+
+ with pytest.raises(ValidationError) as exc:
+ schema.load({**MINIMAL_POST_PAYLOAD, "name": ""})
+ assert "name" in exc.value.messages
+
+
+def test_name_at_max_length_accepted(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+ long_name = "x" * 150
+ result = schema.load({**MINIMAL_POST_PAYLOAD, "name": long_name})
+ assert result["name"] == long_name
+
+
+def test_name_over_max_length_rejected(mocker: MockerFixture) -> None:
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ schema = ReportSchedulePostSchema()
+
+ with pytest.raises(ValidationError) as exc:
+ schema.load({**MINIMAL_POST_PAYLOAD, "name": "x" * 151})
+ assert "name" in exc.value.messages
+
+
+def test_put_schema_allows_database_on_report_type(mocker: MockerFixture) -> None:
+ """PUT schema lacks validate_report_references — database on Report type is
+ accepted (documents current behavior; POST schema correctly rejects this)."""
+ mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+ put_schema = ReportSchedulePutSchema()
+ result = put_schema.load({"type": "Report", "database": 1})
+ assert result["database"] == 1
+
+ # POST schema rejects it (verify the asymmetry)
+ post_schema = ReportSchedulePostSchema()
+ with pytest.raises(ValidationError) as exc:
+ post_schema.load({**MINIMAL_POST_PAYLOAD, "database": 1})
+ assert "database" in exc.value.messages