test(alerts/reports): close backend and frontend test coverage gaps (#38591)

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Joe Li
2026-04-02 11:55:24 -07:00
committed by GitHub
parent f0fcdcc76a
commit 9e27d682f6
20 changed files with 4235 additions and 381 deletions

View File

@@ -0,0 +1,222 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from unittest.mock import MagicMock
import pytest
from marshmallow import ValidationError
from pytest_mock import MockerFixture
from superset.commands.report.create import CreateReportScheduleCommand
from superset.commands.report.exceptions import (
DatabaseNotFoundValidationError,
ReportScheduleAlertRequiredDatabaseValidationError,
ReportScheduleInvalidError,
)
from superset.reports.models import ReportScheduleType
from superset.utils import json
def _make_dashboard(position: dict[str, Any]) -> MagicMock:
dashboard = MagicMock()
dashboard.position_json = json.dumps(position)
return dashboard
def test_validate_report_extra_null_extra() -> None:
command = CreateReportScheduleCommand({})
command._properties = {"extra": None}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_null_dashboard() -> None:
command = CreateReportScheduleCommand({})
command._properties = {"extra": {"dashboard": {}}, "dashboard": None}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_empty_active_tabs() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"activeTabs": []}},
"dashboard": _make_dashboard({"TAB-1": {}, "TAB-2": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_valid_tabs() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"activeTabs": ["TAB-1"]}},
"dashboard": _make_dashboard({"TAB-1": {}, "TAB-2": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_invalid_tabs() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"activeTabs": ["TAB-999"]}},
"dashboard": _make_dashboard({"TAB-1": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 1
assert exceptions[0].field_name == "extra"
def test_validate_report_extra_anchor_json_valid() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"anchor": '["TAB-1"]'}},
"dashboard": _make_dashboard({"TAB-1": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_anchor_invalid_ids() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"anchor": '["TAB-999"]'}},
"dashboard": _make_dashboard({"TAB-1": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 1
assert exceptions[0].field_name == "extra"
def test_validate_report_extra_anchor_string_valid() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"anchor": "TAB-1"}},
"dashboard": _make_dashboard({"TAB-1": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 0
def test_validate_report_extra_anchor_string_invalid() -> None:
command = CreateReportScheduleCommand({})
command._properties = {
"extra": {"dashboard": {"anchor": "TAB-999"}},
"dashboard": _make_dashboard({"TAB-1": {}}),
}
exceptions: list[ValidationError] = []
command._validate_report_extra(exceptions)
assert len(exceptions) == 1
assert exceptions[0].field_name == "extra"
# ---------------------------------------------------------------------------
# Phase 1 gap closure: validate() — alert + database combos
# ---------------------------------------------------------------------------
def _stub_validate_deps(mocker: MockerFixture) -> None:
"""Stub out all DAO and base-class calls inside validate() so the test
can exercise a single validation branch in isolation."""
mocker.patch.object(CreateReportScheduleCommand, "_populate_recipients")
mocker.patch(
"superset.commands.report.create.ReportScheduleDAO.validate_update_uniqueness",
return_value=True,
)
mocker.patch.object(CreateReportScheduleCommand, "validate_report_frequency")
mocker.patch.object(CreateReportScheduleCommand, "validate_chart_dashboard")
mocker.patch.object(CreateReportScheduleCommand, "_validate_report_extra")
mocker.patch(
"superset.commands.report.create.ReportScheduleDAO"
".validate_unique_creation_method",
return_value=True,
)
mocker.patch.object(CreateReportScheduleCommand, "populate_owners", return_value=[])
def test_validate_alert_missing_database_key(mocker: MockerFixture) -> None:
"""Alert type without a 'database' key raises the required-database error."""
_stub_validate_deps(mocker)
command = CreateReportScheduleCommand({})
command._properties = {
"type": ReportScheduleType.ALERT,
"name": "Test Alert",
"crontab": "* * * * *",
"creation_method": "alerts_reports",
}
with pytest.raises(ReportScheduleInvalidError) as exc:
command.validate()
assert any(
isinstance(e, ReportScheduleAlertRequiredDatabaseValidationError)
for e in exc.value._exceptions
)
def test_validate_alert_nonexistent_database(mocker: MockerFixture) -> None:
"""Alert type with a database ID that doesn't exist raises not-found."""
_stub_validate_deps(mocker)
mocker.patch(
"superset.commands.report.create.DatabaseDAO.find_by_id",
return_value=None,
)
command = CreateReportScheduleCommand({})
command._properties = {
"type": ReportScheduleType.ALERT,
"name": "Test Alert",
"crontab": "* * * * *",
"creation_method": "alerts_reports",
"database": 999,
}
with pytest.raises(ReportScheduleInvalidError) as exc:
command.validate()
assert any(
isinstance(e, DatabaseNotFoundValidationError) for e in exc.value._exceptions
)

View File

@@ -16,23 +16,42 @@
# under the License.
import json # noqa: TID251
from datetime import datetime
from datetime import datetime, timedelta
from unittest.mock import patch
from uuid import UUID
from uuid import UUID, uuid4
import pytest
from pytest_mock import MockerFixture
from superset.app import SupersetApp
from superset.commands.exceptions import UpdateFailedError
from superset.commands.report.execute import BaseReportState
from superset.commands.report.exceptions import (
ReportScheduleAlertGracePeriodError,
ReportScheduleCsvFailedError,
ReportSchedulePreviousWorkingError,
ReportScheduleScreenshotFailedError,
ReportScheduleScreenshotTimeout,
ReportScheduleStateNotFoundError,
ReportScheduleUnexpectedError,
ReportScheduleWorkingTimeoutError,
)
from superset.commands.report.execute import (
BaseReportState,
ReportNotTriggeredErrorState,
ReportScheduleStateMachine,
ReportSuccessState,
ReportWorkingState,
)
from superset.daos.report import REPORT_SCHEDULE_ERROR_NOTIFICATION_MARKER
from superset.dashboards.permalink.types import DashboardPermalinkState
from superset.reports.models import (
ReportDataFormat,
ReportRecipients,
ReportRecipientType,
ReportSchedule,
ReportScheduleType,
ReportSourceFormat,
ReportState,
)
from superset.utils.core import HeaderDataType
from superset.utils.screenshots import ChartScreenshot
@@ -357,6 +376,227 @@ def test_get_dashboard_urls_with_exporting_dashboard_only(
assert expected_url == result[0]
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=True)
def test_get_dashboard_urls_with_filters_and_tabs(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
mock_report_schedule.type = "report_type"
mock_report_schedule.report_format = "report_format"
mock_report_schedule.owners = [1, 2]
mock_report_schedule.recipients = []
native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
mock_report_schedule.extra = {
"dashboard": {
"anchor": json.dumps(["TAB-1", "TAB-2"]),
"dataMask": {"NATIVE_FILTER-1": {"filterState": {"value": ["Sales"]}}},
"activeTabs": ["TAB-1", "TAB-2"],
"urlParams": None,
"nativeFilters": [ # type: ignore[typeddict-unknown-key]
{
"nativeFilterId": "NATIVE_FILTER-1",
"filterType": "filter_select",
"columnName": "department",
"filterValues": ["Sales"],
}
],
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
mock_permalink_cls.return_value.run.side_effect = ["key1", "key2"]
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
result: list[str] = class_instance.get_dashboard_urls()
import urllib.parse
base_url = app.config.get("WEBDRIVER_BASEURL", "http://0.0.0.0:8080/")
assert result == [
urllib.parse.urljoin(base_url, "superset/dashboard/p/key1/"),
urllib.parse.urljoin(base_url, "superset/dashboard/p/key2/"),
]
mock_report_schedule.get_native_filters_params.assert_called_once() # type: ignore[attr-defined]
assert mock_permalink_cls.call_count == 2
for call in mock_permalink_cls.call_args_list:
state = call.kwargs["state"]
assert state["urlParams"] == [["native_filters", native_filter_rison]]
assert mock_permalink_cls.call_args_list[0].kwargs["state"]["anchor"] == "TAB-1"
assert mock_permalink_cls.call_args_list[1].kwargs["state"]["anchor"] == "TAB-2"
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=True)
def test_get_dashboard_urls_with_filters_no_tabs(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
mock_report_schedule.type = "report_type"
mock_report_schedule.report_format = "report_format"
mock_report_schedule.owners = [1, 2]
mock_report_schedule.recipients = []
native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
mock_report_schedule.extra = {
"dashboard": {
"anchor": "",
"dataMask": {"NATIVE_FILTER-1": {"filterState": {"value": ["Sales"]}}},
"activeTabs": None,
"urlParams": None,
"nativeFilters": [ # type: ignore[typeddict-unknown-key]
{
"nativeFilterId": "NATIVE_FILTER-1",
"filterType": "filter_select",
"columnName": "department",
"filterValues": ["Sales"],
}
],
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
mock_permalink_cls.return_value.run.return_value = "key1"
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
result: list[str] = class_instance.get_dashboard_urls()
import urllib.parse
base_url = app.config.get("WEBDRIVER_BASEURL", "http://0.0.0.0:8080/")
assert result == [
urllib.parse.urljoin(base_url, "superset/dashboard/p/key1/"),
]
mock_report_schedule.get_native_filters_params.assert_called_once() # type: ignore[attr-defined]
assert mock_permalink_cls.call_count == 1
state = mock_permalink_cls.call_args_list[0].kwargs["state"]
assert state["urlParams"] == [["native_filters", native_filter_rison]]
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=True)
def test_get_dashboard_urls_preserves_existing_url_params(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
"""Existing urlParams (e.g. standalone) must survive native_filters merge."""
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
mock_report_schedule.type = "report_type"
mock_report_schedule.report_format = "report_format"
mock_report_schedule.owners = [1, 2]
mock_report_schedule.recipients = []
native_filter_rison = "(NATIVE_FILTER-1:(filterType:filter_select))"
mock_report_schedule.extra = {
"dashboard": {
"anchor": "",
"dataMask": {},
"activeTabs": None,
"urlParams": [("standalone", "true"), ("show_filters", "0")],
"nativeFilters": [ # type: ignore[typeddict-unknown-key]
{
"nativeFilterId": "NATIVE_FILTER-1",
"filterType": "filter_select",
"columnName": "dept",
"filterValues": ["Sales"],
}
],
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
mock_permalink_cls.return_value.run.return_value = "key1"
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
class_instance.get_dashboard_urls()
state = mock_permalink_cls.call_args_list[0].kwargs["state"]
assert state["urlParams"] == [
["standalone", "true"],
["show_filters", "0"],
["native_filters", native_filter_rison],
]
@patch("superset.commands.report.execute.CreateDashboardPermalinkCommand")
@with_feature_flags(ALERT_REPORT_TABS=True)
def test_get_dashboard_urls_deduplicates_stale_native_filters(
mock_permalink_cls,
mocker: MockerFixture,
app,
) -> None:
"""A stale native_filters entry in urlParams is replaced, not duplicated."""
mock_report_schedule: ReportSchedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.chart_id = None
mock_report_schedule.dashboard_id = 123
mock_report_schedule.type = "report_type"
mock_report_schedule.report_format = "report_format"
mock_report_schedule.owners = [1, 2]
mock_report_schedule.recipients = []
native_filter_rison = "(NATIVE_FILTER-1:(new:value))"
mock_report_schedule.extra = {
"dashboard": {
"anchor": "",
"dataMask": {},
"activeTabs": None,
"urlParams": [
("standalone", "true"),
("native_filters", "(old:stale_value)"),
],
"nativeFilters": [], # type: ignore[typeddict-unknown-key]
}
}
mock_report_schedule.get_native_filters_params.return_value = ( # type: ignore[attr-defined]
native_filter_rison,
[],
)
mock_permalink_cls.return_value.run.return_value = "key1"
class_instance: BaseReportState = BaseReportState(
mock_report_schedule, "January 1, 2021", "execution_id_example"
)
class_instance._report_schedule = mock_report_schedule
class_instance.get_dashboard_urls()
state = mock_permalink_cls.call_args_list[0].kwargs["state"]
assert state["urlParams"] == [
["standalone", "true"],
["native_filters", native_filter_rison],
]
@patch(
"superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
)
@@ -596,3 +836,484 @@ def test_update_recipient_to_slack_v2_missing_channels(mocker: MockerFixture):
)
with pytest.raises(UpdateFailedError):
mock_cmmd.update_report_schedule_slack_v2()
# ---------------------------------------------------------------------------
# Tier 1: _update_query_context + create_log
# ---------------------------------------------------------------------------
def test_update_query_context_wraps_screenshot_failure(mocker: MockerFixture) -> None:
"""_update_query_context wraps ScreenshotFailedError as CsvFailedError."""
schedule = mocker.Mock(spec=ReportSchedule)
state = BaseReportState(schedule, datetime.utcnow(), uuid4())
state._report_schedule = schedule
mocker.patch.object(
state,
"_get_screenshots",
side_effect=ReportScheduleScreenshotFailedError("boom"),
)
with pytest.raises(ReportScheduleCsvFailedError, match="query context"):
state._update_query_context()
def test_update_query_context_wraps_screenshot_timeout(mocker: MockerFixture) -> None:
"""_update_query_context wraps ScreenshotTimeout as CsvFailedError."""
schedule = mocker.Mock(spec=ReportSchedule)
state = BaseReportState(schedule, datetime.utcnow(), uuid4())
state._report_schedule = schedule
mocker.patch.object(
state,
"_get_screenshots",
side_effect=ReportScheduleScreenshotTimeout(),
)
with pytest.raises(ReportScheduleCsvFailedError, match="query context"):
state._update_query_context()
def test_create_log_stale_data_raises_unexpected_error(mocker: MockerFixture) -> None:
"""StaleDataError during create_log should rollback and raise UnexpectedError."""
from sqlalchemy.orm.exc import StaleDataError
schedule = mocker.Mock(spec=ReportSchedule)
schedule.last_value = None
schedule.last_value_row_json = None
schedule.last_state = ReportState.WORKING
state = BaseReportState(schedule, datetime.utcnow(), uuid4())
state._report_schedule = schedule
mock_db = mocker.patch("superset.commands.report.execute.db")
mock_db.session.commit.side_effect = StaleDataError("stale")
# Prevent SQLAlchemy from inspecting the mock schedule during log creation
mocker.patch(
"superset.commands.report.execute.ReportExecutionLog",
return_value=mocker.Mock(),
)
with pytest.raises(ReportScheduleUnexpectedError):
state.create_log()
mock_db.session.rollback.assert_called_once()
# ---------------------------------------------------------------------------
# Tier 2: _get_notification_content branches
# ---------------------------------------------------------------------------
def _make_notification_state(
mocker: MockerFixture,
*,
report_format: ReportDataFormat = ReportDataFormat.PNG,
schedule_type: ReportScheduleType = ReportScheduleType.REPORT,
has_chart: bool = True,
email_subject: str | None = None,
chart_name: str = "My Chart",
dashboard_title: str = "My Dashboard",
) -> BaseReportState:
"""Build a BaseReportState with a mock schedule for notification tests."""
schedule = mocker.Mock(spec=ReportSchedule)
schedule.type = schedule_type
schedule.report_format = report_format
schedule.name = "Test Schedule"
schedule.description = "desc"
schedule.email_subject = email_subject
schedule.force_screenshot = False
schedule.recipients = []
schedule.owners = []
if has_chart:
schedule.chart = mocker.Mock()
schedule.chart.slice_name = chart_name
schedule.dashboard = None
else:
schedule.chart = None
schedule.dashboard = mocker.Mock()
schedule.dashboard.dashboard_title = dashboard_title
schedule.dashboard.uuid = "dash-uuid"
schedule.dashboard.id = 1
schedule.extra = {}
state = BaseReportState(schedule, datetime.utcnow(), uuid4())
state._report_schedule = schedule
# Stub helpers that _get_notification_content calls
mocker.patch.object(state, "_get_log_data", return_value={})
mocker.patch.object(state, "_get_url", return_value="http://example.com")
return state
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_png_screenshot(
mock_ff, mocker: MockerFixture
) -> None:
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(mocker, report_format=ReportDataFormat.PNG)
mocker.patch.object(state, "_get_screenshots", return_value=[b"img1", b"img2"])
content = state._get_notification_content()
assert content.screenshots == [b"img1", b"img2"]
assert content.text is None
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_png_empty_returns_error(
mock_ff, mocker: MockerFixture
) -> None:
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(mocker, report_format=ReportDataFormat.PNG)
mocker.patch.object(state, "_get_screenshots", return_value=[])
content = state._get_notification_content()
assert content.text == "Unexpected missing screenshot"
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_csv_format(mock_ff, mocker: MockerFixture) -> None:
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(
mocker, report_format=ReportDataFormat.CSV, has_chart=True
)
mocker.patch.object(state, "_get_csv_data", return_value=b"col1,col2\n1,2")
content = state._get_notification_content()
assert content.csv == b"col1,col2\n1,2"
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_text_format(mock_ff, mocker: MockerFixture) -> None:
import pandas as pd
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(
mocker, report_format=ReportDataFormat.TEXT, has_chart=True
)
df = pd.DataFrame({"a": [1]})
mocker.patch.object(state, "_get_embedded_data", return_value=df)
content = state._get_notification_content()
assert content.embedded_data is not None
assert list(content.embedded_data.columns) == ["a"]
@pytest.mark.parametrize(
"email_subject,has_chart,expected_name",
[
("Custom Subject", True, "Custom Subject"),
(None, True, "Test Schedule: My Chart"),
(None, False, "Test Schedule: My Dashboard"),
],
ids=["email_subject", "chart_name", "dashboard_name"],
)
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_name(
mock_ff,
mocker: MockerFixture,
email_subject: str | None,
has_chart: bool,
expected_name: str,
) -> None:
"""Notification name comes from email_subject, chart, or dashboard."""
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(
mocker,
report_format=ReportDataFormat.PNG,
email_subject=email_subject,
has_chart=has_chart,
)
mocker.patch.object(state, "_get_screenshots", return_value=[b"img"])
content = state._get_notification_content()
assert content.name == expected_name
# ---------------------------------------------------------------------------
# Tier 3: State machine top-level branches
# ---------------------------------------------------------------------------
def _make_state_instance(
mocker: MockerFixture,
cls: type,
*,
schedule_type: ReportScheduleType = ReportScheduleType.ALERT,
last_state: ReportState = ReportState.NOOP,
grace_period: int = 3600,
working_timeout: int = 3600,
) -> BaseReportState:
"""Create a state-machine state instance with a mocked schedule."""
schedule = mocker.Mock(spec=ReportSchedule)
schedule.type = schedule_type
schedule.last_state = last_state
schedule.grace_period = grace_period
schedule.working_timeout = working_timeout
schedule.last_eval_dttm = datetime.utcnow()
schedule.name = "Test"
schedule.owners = []
schedule.recipients = []
schedule.force_screenshot = False
schedule.extra = {}
instance = cls(schedule, datetime.utcnow(), uuid4())
instance._report_schedule = schedule
return instance
def test_working_state_timeout_raises_timeout_error(mocker: MockerFixture) -> None:
"""Working state past timeout should raise WorkingTimeoutError and log ERROR."""
state = _make_state_instance(mocker, ReportWorkingState)
mocker.patch.object(state, "is_on_working_timeout", return_value=True)
mock_log = mocker.Mock()
mock_log.end_dttm = datetime.utcnow() - timedelta(hours=2)
mocker.patch(
"superset.commands.report.execute.ReportScheduleDAO.find_last_entered_working_log",
return_value=mock_log,
)
mocker.patch.object(state, "update_report_schedule_and_log")
with pytest.raises(ReportScheduleWorkingTimeoutError):
state.next()
state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
ReportState.ERROR,
error_message=str(ReportScheduleWorkingTimeoutError()),
)
def test_working_state_still_working_raises_previous_working(
mocker: MockerFixture,
) -> None:
"""Working state not yet timed out should raise PreviousWorkingError."""
state = _make_state_instance(mocker, ReportWorkingState)
mocker.patch.object(state, "is_on_working_timeout", return_value=False)
mocker.patch.object(state, "update_report_schedule_and_log")
with pytest.raises(ReportSchedulePreviousWorkingError):
state.next()
state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
ReportState.WORKING,
error_message=str(ReportSchedulePreviousWorkingError()),
)
def test_success_state_grace_period_returns_without_sending(
mocker: MockerFixture,
) -> None:
"""Alert in grace period should set GRACE state and not send."""
state = _make_state_instance(
mocker,
ReportSuccessState,
schedule_type=ReportScheduleType.ALERT,
)
mocker.patch.object(state, "is_in_grace_period", return_value=True)
mocker.patch.object(state, "update_report_schedule_and_log")
mock_send = mocker.patch.object(state, "send")
state.next()
mock_send.assert_not_called()
state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
ReportState.GRACE,
error_message=str(ReportScheduleAlertGracePeriodError()),
)
def test_not_triggered_error_state_send_failure_logs_error_and_reraises(
mocker: MockerFixture,
) -> None:
"""When send() fails in NOOP/ERROR state, error should be logged and re-raised."""
state = _make_state_instance(
mocker,
ReportNotTriggeredErrorState,
schedule_type=ReportScheduleType.REPORT,
)
send_error = RuntimeError("send failed")
mocker.patch.object(state, "send", side_effect=send_error)
mocker.patch.object(state, "update_report_schedule_and_log")
mocker.patch.object(state, "is_in_error_grace_period", return_value=True)
with pytest.raises(RuntimeError, match="send failed"):
state.next()
# Should have logged WORKING, then ERROR
calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
assert calls[0].args[0] == ReportState.WORKING
assert calls[1].args[0] == ReportState.ERROR
error_msg = calls[1].kwargs.get("error_message") or (
calls[1].args[1] if len(calls[1].args) > 1 else ""
)
assert "send failed" in error_msg
# ---------------------------------------------------------------------------
# Phase 1 remaining gaps
# ---------------------------------------------------------------------------
def test_get_dashboard_urls_no_state_fallback(
mocker: MockerFixture, app: SupersetApp
) -> None:
"""No dashboard state in extra -> standard dashboard URL, not permalink."""
mock_report_schedule = mocker.Mock(spec=ReportSchedule)
mock_report_schedule.chart = False
mock_report_schedule.force_screenshot = False
mock_report_schedule.extra = {} # no dashboard state
mock_report_schedule.dashboard = mocker.Mock()
mock_report_schedule.dashboard.uuid = "dash-uuid-123"
mock_report_schedule.dashboard.id = 42
mock_report_schedule.recipients = []
state = BaseReportState(mock_report_schedule, "Jan 1", "exec_id")
state._report_schedule = mock_report_schedule
result = state.get_dashboard_urls()
assert len(result) == 1
assert "superset/dashboard/" in result[0]
assert "dashboard/p/" not in result[0] # not a permalink
def test_success_state_alert_command_error_sends_error_and_reraises(
mocker: MockerFixture,
) -> None:
"""AlertCommand exception -> send_error + ERROR state with marker."""
state = _make_state_instance(
mocker, ReportSuccessState, schedule_type=ReportScheduleType.ALERT
)
mocker.patch.object(state, "is_in_grace_period", return_value=False)
mocker.patch.object(state, "update_report_schedule_and_log")
mocker.patch.object(state, "send_error")
mocker.patch(
"superset.commands.report.execute.AlertCommand"
).return_value.run.side_effect = RuntimeError("alert boom")
with pytest.raises(RuntimeError, match="alert boom"):
state.next()
state.send_error.assert_called_once() # type: ignore[attr-defined]
calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
# First call: WORKING, second call: ERROR with marker
assert calls[0].args[0] == ReportState.WORKING
assert calls[1].args[0] == ReportState.ERROR
assert (
calls[1].kwargs.get("error_message")
== REPORT_SCHEDULE_ERROR_NOTIFICATION_MARKER
)
def test_success_state_send_error_logs_and_reraises(
mocker: MockerFixture,
) -> None:
"""send() exception for REPORT type -> ERROR state + re-raise."""
state = _make_state_instance(
mocker, ReportSuccessState, schedule_type=ReportScheduleType.REPORT
)
mocker.patch.object(state, "send", side_effect=RuntimeError("send boom"))
mocker.patch.object(state, "update_report_schedule_and_log")
with pytest.raises(RuntimeError, match="send boom"):
state.next()
calls = state.update_report_schedule_and_log.call_args_list # type: ignore[attr-defined]
assert calls[-1].args[0] == ReportState.ERROR
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_pdf_format(mock_ff, mocker: MockerFixture) -> None:
"""PDF report format branch produces pdf content."""
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(mocker, report_format=ReportDataFormat.PDF)
mocker.patch.object(state, "_get_pdf", return_value=b"%PDF-fake")
content = state._get_notification_content()
assert content.pdf == b"%PDF-fake"
assert content.text is None
# ---------------------------------------------------------------------------
# Phase 1 gap closure: state machine, feature flag, create_log, success path
# ---------------------------------------------------------------------------
def test_state_machine_unknown_state_raises_not_found(
mocker: MockerFixture,
) -> None:
"""State machine raises StateNotFoundError when last_state matches no class."""
schedule = mocker.Mock(spec=ReportSchedule)
# Use a string that isn't in any state class's current_states
schedule.last_state = "NONEXISTENT_STATE"
sm = ReportScheduleStateMachine(uuid4(), schedule, datetime.utcnow())
with pytest.raises(ReportScheduleStateNotFoundError):
sm.run()
@patch("superset.commands.report.execute.feature_flag_manager")
def test_get_notification_content_alert_no_flag_skips_attachment(
mock_ff, mocker: MockerFixture
) -> None:
"""Alert with ALERTS_ATTACH_REPORTS=False skips screenshot/pdf/csv attachment."""
mock_ff.is_feature_enabled.return_value = False
state = _make_notification_state(
mocker,
report_format=ReportDataFormat.PNG,
schedule_type=ReportScheduleType.ALERT,
has_chart=True,
)
mock_screenshots = mocker.patch.object(state, "_get_screenshots")
content = state._get_notification_content()
# _get_screenshots should NOT be called — the attachment block is skipped
mock_screenshots.assert_not_called()
assert content.screenshots == []
assert content.text is None
def test_create_log_success_commits(mocker: MockerFixture) -> None:
"""Successful create_log creates a log entry and commits."""
schedule = mocker.Mock(spec=ReportSchedule)
schedule.last_value = "42"
schedule.last_value_row_json = '{"col": 42}'
schedule.last_state = ReportState.SUCCESS
state = BaseReportState(schedule, datetime.utcnow(), uuid4())
state._report_schedule = schedule
mock_db = mocker.patch("superset.commands.report.execute.db")
mock_log_cls = mocker.patch(
"superset.commands.report.execute.ReportExecutionLog",
return_value=mocker.Mock(),
)
state.create_log(error_message=None)
mock_log_cls.assert_called_once()
mock_db.session.add.assert_called_once()
mock_db.session.commit.assert_called_once()
mock_db.session.rollback.assert_not_called()
def test_success_state_report_sends_and_logs_success(
mocker: MockerFixture,
) -> None:
"""REPORT type success path: send() + update state to SUCCESS."""
state = _make_state_instance(
mocker,
ReportSuccessState,
schedule_type=ReportScheduleType.REPORT,
)
mock_send = mocker.patch.object(state, "send")
mocker.patch.object(state, "update_report_schedule_and_log")
state.next()
mock_send.assert_called_once()
state.update_report_schedule_and_log.assert_called_once_with( # type: ignore[attr-defined]
ReportState.SUCCESS,
error_message=None,
)

View File

@@ -24,10 +24,13 @@ import pytest
from pytest_mock import MockerFixture
from superset.commands.report.exceptions import (
ReportScheduleForbiddenError,
ReportScheduleInvalidError,
)
from superset.commands.report.update import UpdateReportScheduleCommand
from superset.reports.models import ReportScheduleType
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.reports.models import ReportScheduleType, ReportState
def _make_model(
@@ -252,3 +255,71 @@ def test_report_to_alert_with_db_accepted(mocker: MockerFixture) -> None:
data={"type": ReportScheduleType.ALERT, "database": 5},
)
cmd.validate() # should not raise
# --- Deactivation state reset ---
def test_deactivation_resets_working_state_to_noop(mocker: MockerFixture) -> None:
"""Deactivating a report in WORKING state should reset last_state to NOOP."""
model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
model.last_state = ReportState.WORKING
_setup_mocks(mocker, model)
cmd = UpdateReportScheduleCommand(model_id=1, data={"active": False})
cmd.validate()
assert cmd._properties["last_state"] == ReportState.NOOP
def test_deactivation_from_non_working_does_not_reset(mocker: MockerFixture) -> None:
"""Deactivating a report NOT in WORKING state should not touch last_state."""
model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
model.last_state = ReportState.SUCCESS
_setup_mocks(mocker, model)
cmd = UpdateReportScheduleCommand(model_id=1, data={"active": False})
cmd.validate()
assert "last_state" not in cmd._properties
# --- Ownership check ---
def test_ownership_check_raises_forbidden(mocker: MockerFixture) -> None:
"""Non-owner should get ReportScheduleForbiddenError."""
model = _make_model(mocker, model_type=ReportScheduleType.REPORT, database_id=None)
_setup_mocks(mocker, model)
mocker.patch(
"superset.commands.report.update.security_manager.raise_for_ownership",
side_effect=SupersetSecurityException(
SupersetError(
message="Forbidden",
error_type=SupersetErrorType.GENERIC_BACKEND_ERROR,
level=ErrorLevel.ERROR,
)
),
)
cmd = UpdateReportScheduleCommand(model_id=1, data={})
with pytest.raises(ReportScheduleForbiddenError):
cmd.validate()
# --- Database not found for alert ---
def test_alert_with_nonexistent_database_rejected(mocker: MockerFixture) -> None:
"""Alert with a database ID that doesn't exist should fail validation."""
model = _make_model(mocker, model_type=ReportScheduleType.ALERT, database_id=None)
_setup_mocks(mocker, model)
mocker.patch(
"superset.commands.report.update.DatabaseDAO.find_by_id",
return_value=None,
)
cmd = UpdateReportScheduleCommand(model_id=1, data={"database": 99999})
with pytest.raises(ReportScheduleInvalidError) as exc_info:
cmd.validate()
messages = _get_validation_messages(exc_info)
assert "database" in messages
assert "does not exist" in messages["database"].lower()

View File

@@ -0,0 +1,52 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from unittest.mock import patch
import prison
from superset.exceptions import SupersetException
from tests.unit_tests.conftest import with_feature_flags
@with_feature_flags(ALERT_REPORTS=True)
@patch("superset.reports.api.get_channels_with_search")
def test_slack_channels_success(
mock_search: Any,
client: Any,
full_api_access: None,
) -> None:
mock_search.return_value = [{"id": "C123", "name": "general"}]
params = prison.dumps({})
rv = client.get(f"/api/v1/report/slack_channels/?q={params}")
assert rv.status_code == 200
data = rv.json
assert data["result"] == [{"id": "C123", "name": "general"}]
@with_feature_flags(ALERT_REPORTS=True)
@patch("superset.reports.api.get_channels_with_search")
def test_slack_channels_handles_superset_exception(
mock_search: Any,
client: Any,
full_api_access: None,
) -> None:
mock_search.side_effect = SupersetException("Slack API error")
params = prison.dumps({})
rv = client.get(f"/api/v1/report/slack_channels/?q={params}")
assert rv.status_code == 422
assert "Slack API error" in rv.json["message"]

View File

@@ -0,0 +1,91 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest.mock import MagicMock, patch
@patch("superset.daos.report.get_user_id", return_value=1)
@patch("superset.daos.report.db")
def test_validate_unique_creation_method_duplicate_returns_false(
mock_db: MagicMock,
mock_uid: MagicMock,
) -> None:
from superset.daos.report import ReportScheduleDAO
# Simulate that a matching report already exists
mock_db.session.query.return_value.filter_by.return_value.filter.return_value = (
MagicMock()
)
mock_db.session.query.return_value.scalar.return_value = True
assert ReportScheduleDAO.validate_unique_creation_method(dashboard_id=1) is False
@patch("superset.daos.report.get_user_id", return_value=1)
@patch("superset.daos.report.db")
def test_validate_unique_creation_method_no_duplicate_returns_true(
mock_db: MagicMock,
mock_uid: MagicMock,
) -> None:
from superset.daos.report import ReportScheduleDAO
mock_db.session.query.return_value.filter_by.return_value.filter.return_value = (
MagicMock()
)
mock_db.session.query.return_value.scalar.return_value = False
assert ReportScheduleDAO.validate_unique_creation_method(dashboard_id=1) is True
@patch("superset.daos.report.db")
def test_find_last_error_notification_returns_none_after_success(
mock_db: MagicMock,
) -> None:
from superset.daos.report import ReportScheduleDAO
schedule = MagicMock()
error_log = MagicMock()
success_log = MagicMock()
# Build the query chain so each .query().filter().order_by().first() call
# returns a different result. The DAO calls db.session.query() twice:
# 1st call finds the error marker log
# 2nd call finds a non-error log after it (success happened since last error email)
query_mock = MagicMock()
mock_db.session.query.return_value = query_mock
chain = query_mock.filter.return_value.order_by.return_value
chain.first.side_effect = [error_log, success_log]
result = ReportScheduleDAO.find_last_error_notification(schedule)
# Success log exists after error → should return None (no re-notification needed)
assert result is None
@patch("superset.daos.report.db")
def test_find_last_error_notification_returns_log_when_only_errors(
mock_db: MagicMock,
) -> None:
from superset.daos.report import ReportScheduleDAO
schedule = MagicMock()
error_log = MagicMock()
query_mock = MagicMock()
mock_db.session.query.return_value = query_mock
chain = query_mock.filter.return_value.order_by.return_value
# 1st call: error marker log found; 2nd call: no success log after it
chain.first.side_effect = [error_log, None]
result = ReportScheduleDAO.find_last_error_notification(schedule)
assert result is error_log

View File

@@ -0,0 +1,64 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest.mock import MagicMock, patch
@patch("superset.reports.filters.security_manager", new_callable=MagicMock)
def test_report_schedule_filter_admin_sees_all(mock_sm: MagicMock) -> None:
from superset.reports.filters import ReportScheduleFilter
mock_sm.can_access_all_datasources.return_value = True
query = MagicMock()
f = ReportScheduleFilter("id", MagicMock())
result = f.apply(query, None)
assert result is query
query.filter.assert_not_called()
@patch("superset.reports.filters.security_manager", new_callable=MagicMock)
@patch("superset.reports.filters.db")
def test_report_schedule_filter_non_admin_filtered(
mock_db: MagicMock, mock_sm: MagicMock
) -> None:
from superset.reports.filters import ReportScheduleFilter
mock_sm.can_access_all_datasources.return_value = False
mock_sm.user_model.get_user_id.return_value = 1
mock_sm.user_model.id = 1
query = MagicMock()
f = ReportScheduleFilter("id", MagicMock())
f.apply(query, None)
query.filter.assert_called_once()
def test_report_schedule_all_text_filter_empty_noop() -> None:
from superset.reports.filters import ReportScheduleAllTextFilter
query = MagicMock()
f = ReportScheduleAllTextFilter("name", MagicMock())
result = f.apply(query, "")
assert result is query
query.filter.assert_not_called()
def test_report_schedule_all_text_filter_applies_ilike() -> None:
from superset.reports.filters import ReportScheduleAllTextFilter
query = MagicMock()
f = ReportScheduleAllTextFilter("name", MagicMock())
f.apply(query, "test")
query.filter.assert_called_once()

View File

@@ -323,6 +323,124 @@ def test_report_generate_native_filter_no_column_name():
assert warning is None
def test_report_generate_native_filter_select_null_column():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F1", "filter_select", None, ["US"]
)
assert result["F1"]["extraFormData"]["filters"][0]["col"] == ""
assert result["F1"]["filterState"]["label"] == ""
assert warning is None
def test_generate_native_filter_time_normal():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F2", "filter_time", "ignored", ["Last week"]
)
assert result == {
"F2": {
"id": "F2",
"extraFormData": {"time_range": "Last week"},
"filterState": {"value": "Last week"},
"ownState": {},
}
}
assert warning is None
def test_generate_native_filter_timegrain_normal():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F3", "filter_timegrain", "ignored", ["P1D"]
)
assert result == {
"F3": {
"id": "F3",
"extraFormData": {"time_grain_sqla": "P1D"},
"filterState": {"value": ["P1D"]},
"ownState": {},
}
}
assert warning is None
def test_generate_native_filter_timecolumn_normal():
"""filter_timecolumn is the only branch missing 'id' in its output."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F4", "filter_timecolumn", "ignored", ["ds"]
)
assert result == {
"F4": {
"extraFormData": {"granularity_sqla": "ds"},
"filterState": {"value": ["ds"]},
}
}
assert "id" not in result["F4"]
assert warning is None
def test_generate_native_filter_range_normal():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [10, 100]
)
assert result == {
"F5": {
"id": "F5",
"extraFormData": {
"filters": [
{"col": "price", "op": ">=", "val": 10},
{"col": "price", "op": "<=", "val": 100},
]
},
"filterState": {
"value": [10, 100],
"label": "10 ≤ x ≤ 100",
},
"ownState": {},
}
}
assert warning is None
def test_generate_native_filter_range_min_only():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [10]
)
assert result["F5"]["extraFormData"]["filters"] == [
{"col": "price", "op": ">=", "val": 10}
]
assert result["F5"]["filterState"]["label"] == "x ≥ 10"
assert result["F5"]["filterState"]["value"] == [10, None]
assert warning is None
def test_generate_native_filter_range_max_only():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [None, 100]
)
assert result["F5"]["extraFormData"]["filters"] == [
{"col": "price", "op": "<=", "val": 100}
]
assert result["F5"]["filterState"]["label"] == "x ≤ 100"
assert warning is None
def test_generate_native_filter_range_empty_values():
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", []
)
assert result["F5"]["extraFormData"]["filters"] == []
assert result["F5"]["filterState"]["label"] == ""
assert result["F5"]["filterState"]["value"] == [None, None]
assert warning is None
def test_report_generate_native_filter_unknown_filter_type():
"""
Test the ``_generate_native_filter`` method with an unknown filter type.
@@ -344,6 +462,34 @@ def test_report_generate_native_filter_unknown_filter_type():
assert "filter_id" in warning
def test_get_native_filters_params_null_native_filters():
report_schedule = ReportSchedule()
report_schedule.extra = {"dashboard": {"nativeFilters": None}}
result, warnings = report_schedule.get_native_filters_params()
assert result == "()"
assert warnings == []
def test_get_native_filters_params_rison_quote_escaping():
report_schedule = ReportSchedule()
report_schedule.extra = {
"dashboard": {
"nativeFilters": [
{
"nativeFilterId": "F1",
"filterType": "filter_select",
"columnName": "name",
"filterValues": ["O'Brien"],
}
]
}
}
result, warnings = report_schedule.get_native_filters_params()
assert "'" not in result
assert "%27" in result
assert warnings == []
def test_get_native_filters_params_unknown_filter_type():
"""
Test the ``get_native_filters_params`` method with an unknown filter type.
@@ -378,3 +524,135 @@ def test_get_native_filters_params_unknown_filter_type():
assert len(warnings) == 1
assert "unrecognized filter type" in warnings[0]
assert "filter_unknown_type" in warnings[0]
def test_get_native_filters_params_missing_filter_id_key():
report_schedule = ReportSchedule()
report_schedule.extra = {
"dashboard": {
"nativeFilters": [
{
"filterType": "filter_select",
"columnName": "col",
"filterValues": ["v"],
# Missing "nativeFilterId" key — skipped by defensive guard
}
]
}
}
result, warnings = report_schedule.get_native_filters_params()
assert result == "()"
assert len(warnings) == 1
assert "Skipping malformed native filter" in warnings[0]
def test_generate_native_filter_empty_filter_id():
"""Empty native_filter_id triggers the ``or ""`` fallback branches."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"", "filter_select", "col", ["x"]
)
assert "" in result
assert result[""]["id"] == ""
assert warning is None
def test_generate_native_filter_range_zero_min():
"""Zero min_val should produce a two-sided label, not a max-only label."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [0, 100]
)
assert result["F5"]["extraFormData"]["filters"] == [
{"col": "price", "op": ">=", "val": 0},
{"col": "price", "op": "<=", "val": 100},
]
# Label generation correctly treats zero as a valid bound
assert result["F5"]["filterState"]["label"] == "0 ≤ x ≤ 100"
def test_generate_native_filter_range_zero_max():
"""Zero max_val should produce a two-sided label, not a min-only label."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [10, 0]
)
assert result["F5"]["extraFormData"]["filters"] == [
{"col": "price", "op": ">=", "val": 10},
{"col": "price", "op": "<=", "val": 0},
]
assert result["F5"]["filterState"]["label"] == "10 ≤ x ≤ 0"
def test_generate_native_filter_range_both_zero():
"""Both values zero should produce a two-sided label, not an empty string."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", "price", [0, 0]
)
assert result["F5"]["extraFormData"]["filters"] == [
{"col": "price", "op": ">=", "val": 0},
{"col": "price", "op": "<=", "val": 0},
]
assert result["F5"]["filterState"]["label"] == "0 ≤ x ≤ 0"
def test_get_native_filters_params_missing_filter_type():
"""Missing filterType skips the filter and emits a warning."""
report_schedule = ReportSchedule()
report_schedule.extra = {
"dashboard": {
"nativeFilters": [
{
"nativeFilterId": "F1",
"columnName": "col",
"filterValues": ["v"],
}
]
}
}
result, warnings = report_schedule.get_native_filters_params()
assert result == "()"
assert len(warnings) == 1
assert "Skipping malformed native filter" in warnings[0]
def test_get_native_filters_params_missing_column_name():
"""Missing columnName defaults to empty string via .get() fallback."""
report_schedule = ReportSchedule()
report_schedule.extra = {
"dashboard": {
"nativeFilters": [
{
"nativeFilterId": "F1",
"filterType": "filter_select",
"filterValues": ["v"],
}
]
}
}
result, warnings = report_schedule.get_native_filters_params()
assert "F1" in result
assert warnings == []
def test_generate_native_filter_range_null_column():
"""Range filter with None column_name falls back to empty string."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"F5", "filter_range", None, [10, 100]
)
assert result["F5"]["extraFormData"]["filters"][0]["col"] == ""
assert result["F5"]["extraFormData"]["filters"][1]["col"] == ""
assert warning is None
def test_generate_native_filter_time_empty_id():
"""Empty string filter ID for filter_time uses the ``or ""`` fallback."""
report_schedule = ReportSchedule()
result, warning = report_schedule._generate_native_filter(
"", "filter_time", "ignored", ["Last week"]
)
assert "" in result
assert result[""]["id"] == ""
assert warning is None

View File

@@ -377,3 +377,57 @@ def test_send_slack_no_feature_flag(
```
""",
)
@patch("superset.reports.notifications.slackv2.g")
@patch("superset.reports.notifications.slackv2.get_slack_client")
def test_slackv2_send_without_channels_raises(
slack_client_mock: MagicMock,
flask_global_mock: MagicMock,
mock_header_data,
) -> None:
from superset.reports.models import ReportRecipients, ReportRecipientType
from superset.reports.notifications.base import NotificationContent
from superset.reports.notifications.exceptions import NotificationParamException
flask_global_mock.logs_context = {}
content = NotificationContent(name="test", header_data=mock_header_data)
notification = SlackV2Notification(
recipient=ReportRecipients(
type=ReportRecipientType.SLACKV2,
recipient_config_json='{"target": ""}',
),
content=content,
)
with pytest.raises(NotificationParamException, match="No recipients"):
notification.send()
@patch("superset.reports.notifications.slackv2.g")
@patch("superset.reports.notifications.slackv2.get_slack_client")
def test_slack_mixin_get_body_truncates_large_table(
slack_client_mock: MagicMock,
flask_global_mock: MagicMock,
mock_header_data,
) -> None:
from superset.reports.models import ReportRecipients, ReportRecipientType
from superset.reports.notifications.base import NotificationContent
flask_global_mock.logs_context = {}
# Create a large DataFrame that exceeds the 4000-char message limit
large_df = pd.DataFrame({"col_" + str(i): range(100) for i in range(10)})
content = NotificationContent(
name="test",
header_data=mock_header_data,
embedded_data=large_df,
description="desc",
)
notification = SlackV2Notification(
recipient=ReportRecipients(
type=ReportRecipientType.SLACKV2,
recipient_config_json='{"target": "some_channel"}',
),
content=content,
)
body = notification._get_body(content=content)
assert "(table was truncated)" in body

View File

@@ -19,7 +19,7 @@ import pytest
from marshmallow import ValidationError
from pytest_mock import MockerFixture
from superset.reports.schemas import ReportSchedulePostSchema
from superset.reports.schemas import ReportSchedulePostSchema, ReportSchedulePutSchema
def test_report_post_schema_custom_width_validation(mocker: MockerFixture) -> None:
@@ -75,3 +75,184 @@ def test_report_post_schema_custom_width_validation(mocker: MockerFixture) -> No
assert excinfo.value.messages == {
"custom_width": ["Screenshot width must be between 100px and 200px"]
}
MINIMAL_POST_PAYLOAD = {
"type": "Report",
"name": "A report",
"crontab": "* * * * *",
"timezone": "America/Los_Angeles",
}
CUSTOM_WIDTH_CONFIG = {
"ALERT_REPORTS_MIN_CUSTOM_SCREENSHOT_WIDTH": 600,
"ALERT_REPORTS_MAX_CUSTOM_SCREENSHOT_WIDTH": 2400,
}
@pytest.mark.parametrize(
"schema_class,payload_base",
[
(ReportSchedulePostSchema, MINIMAL_POST_PAYLOAD),
(ReportSchedulePutSchema, {}),
],
ids=["post", "put"],
)
@pytest.mark.parametrize(
"width,should_pass",
[
(599, False),
(600, True),
(2400, True),
(2401, False),
(None, True),
],
)
def test_custom_width_boundary_values(
mocker: MockerFixture,
schema_class: type,
payload_base: dict[str, object],
width: int | None,
should_pass: bool,
) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = schema_class()
payload = {**payload_base, "custom_width": width}
if should_pass:
schema.load(payload)
else:
with pytest.raises(ValidationError) as exc:
schema.load(payload)
assert "custom_width" in exc.value.messages
def test_working_timeout_validation(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
post_schema = ReportSchedulePostSchema()
put_schema = ReportSchedulePutSchema()
# POST: working_timeout=0 and -1 are invalid (min=1)
with pytest.raises(ValidationError) as exc:
post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 0})
assert "working_timeout" in exc.value.messages
with pytest.raises(ValidationError) as exc:
post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": -1})
assert "working_timeout" in exc.value.messages
# POST: working_timeout=1 is valid
post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 1})
# PUT: working_timeout=None is valid (allow_none=True)
put_schema.load({"working_timeout": None})
def test_log_retention_post_vs_put_parity(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
post_schema = ReportSchedulePostSchema()
put_schema = ReportSchedulePutSchema()
# POST: log_retention=0 is invalid (min=1)
with pytest.raises(ValidationError) as exc:
post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 0})
assert "log_retention" in exc.value.messages
# POST: log_retention=1 is valid
post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 1})
# PUT: log_retention=0 is valid (min=0)
put_schema.load({"log_retention": 0})
def test_report_type_disallows_database(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
with pytest.raises(ValidationError) as exc:
schema.load({**MINIMAL_POST_PAYLOAD, "database": 1})
assert "database" in exc.value.messages
def test_alert_type_allows_database(mocker: MockerFixture) -> None:
"""Alert type should accept database; only Report type blocks it."""
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
result = schema.load({**MINIMAL_POST_PAYLOAD, "type": "Alert", "database": 1})
assert result["database"] == 1
# ---------------------------------------------------------------------------
# Phase 1b gap closure: crontab validator, name length, PUT parity
# ---------------------------------------------------------------------------
@pytest.mark.parametrize(
"crontab,should_pass",
[
("* * * * *", True),
("0 0 * * 0", True),
("*/5 * * * *", True),
("not a cron", False),
("* * * *", False), # too few fields
("", False),
],
ids=["every-min", "weekly", "every-5", "invalid-text", "too-few-fields", "empty"],
)
def test_crontab_validation(
mocker: MockerFixture,
crontab: str,
should_pass: bool,
) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
payload = {**MINIMAL_POST_PAYLOAD, "crontab": crontab}
if should_pass:
result = schema.load(payload)
assert result["crontab"] == crontab
else:
with pytest.raises(ValidationError) as exc:
schema.load(payload)
assert "crontab" in exc.value.messages
def test_name_empty_rejected(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
with pytest.raises(ValidationError) as exc:
schema.load({**MINIMAL_POST_PAYLOAD, "name": ""})
assert "name" in exc.value.messages
def test_name_at_max_length_accepted(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
long_name = "x" * 150
result = schema.load({**MINIMAL_POST_PAYLOAD, "name": long_name})
assert result["name"] == long_name
def test_name_over_max_length_rejected(mocker: MockerFixture) -> None:
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
schema = ReportSchedulePostSchema()
with pytest.raises(ValidationError) as exc:
schema.load({**MINIMAL_POST_PAYLOAD, "name": "x" * 151})
assert "name" in exc.value.messages
def test_put_schema_allows_database_on_report_type(mocker: MockerFixture) -> None:
"""PUT schema lacks validate_report_references — database on Report type is
accepted (documents current behavior; POST schema correctly rejects this)."""
mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
put_schema = ReportSchedulePutSchema()
result = put_schema.load({"type": "Report", "database": 1})
assert result["database"] == 1
# POST schema rejects it (verify the asymmetry)
post_schema = ReportSchedulePostSchema()
with pytest.raises(ValidationError) as exc:
post_schema.load({**MINIMAL_POST_PAYLOAD, "database": 1})
assert "database" in exc.value.messages