Compare commits

...

6 Commits

Author SHA1 Message Date
dependabot[bot]
d2da14442d chore(deps): bump actions/dependency-review-action from 4.9.0 to 5.0.0
Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.9.0 to 5.0.0.
- [Release notes](https://github.com/actions/dependency-review-action/releases)
- [Commits](2031cfc080...a1d282b36b)

---
updated-dependencies:
- dependency-name: actions/dependency-review-action
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-05-11 08:41:10 +00:00
Evan Rusackas
f81821086a chore(releasing): fix email parsing in verify_release.py (#39602)
Co-authored-by: Claude Opus 4.7 <noreply@anthropic.com>
2026-05-09 08:57:33 -07:00
dependabot[bot]
f67dd4a8f3 chore(deps): bump geostyler from 18.5.0 to 18.5.1 in /superset-frontend (#39702)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-05-08 16:17:31 -07:00
Maxime Beauchemin
68fa8e2733 fix(viz): flatten MultiIndex columns in Time-Series Table for multiple Group By (#37869)
Co-authored-by: Claude Opus 4 <noreply@anthropic.com>
Co-authored-by: Evan Rusackas <evan@preset.io>
2026-05-08 16:11:13 -07:00
Maxime Beauchemin
a60860c969 fix(table): fall back to datasource columns for conditional formatting when query results are empty (#39345)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Joe Li <joe@preset.io>
2026-05-08 16:10:41 -07:00
Maxime Beauchemin
d023fe1703 fix(trino/presto): use equality for boolean filters to support computed columns (#39500) 2026-05-08 16:10:27 -07:00
11 changed files with 397 additions and 46 deletions

View File

@@ -29,7 +29,7 @@ jobs:
- name: "Checkout Repository"
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: "Dependency Review"
uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0
uses: actions/dependency-review-action@a1d282b36b6f3519aa1f3fc636f609c47dddb294 # v5.0.0
continue-on-error: true
with:
fail-on-severity: critical

View File

@@ -56,8 +56,33 @@ def verify_sha512(filename: str) -> str:
# Part 2: Verify RSA key - this is the same as running `gpg --verify {release}.asc {release}` and comparing the RSA key and email address against the KEYS file # noqa: E501
KEYS_URL = "https://downloads.apache.org/superset/KEYS"
def ensure_keys_imported() -> None:
"""Import the Apache Superset KEYS file into the local GPG keyring.
Without this, `gpg --verify` returns "No public key" and the signature
cannot actually be verified — only the key ID in the signature metadata
is visible.
"""
try:
keys = requests.get(KEYS_URL, timeout=30)
except requests.RequestException as exc:
print(f"Warning: could not fetch KEYS file for import: {exc}")
return
if keys.status_code != 200:
print(f"Warning: could not fetch KEYS file (HTTP {keys.status_code})")
return
subprocess.run( # noqa: S603
["gpg", "--import"], # noqa: S607
input=keys.content,
capture_output=True,
)
def get_gpg_info(filename: str) -> tuple[Optional[str], Optional[str]]:
"""Run the GPG verify command and extract RSA key and email address."""
"""Run the GPG verify command and extract RSA/EDDSA key and email address."""
asc_filename = filename + ".asc"
result = subprocess.run( # noqa: S603
["gpg", "--verify", asc_filename, filename], # noqa: S607
@@ -65,25 +90,50 @@ def get_gpg_info(filename: str) -> tuple[Optional[str], Optional[str]]:
)
output = result.stderr.decode()
# If no public key was available, import KEYS and retry so that
# `Good signature from "Name <email>"` appears in the output.
if "No public key" in output:
ensure_keys_imported()
result = subprocess.run( # noqa: S603
["gpg", "--verify", asc_filename, filename], # noqa: S607
capture_output=True, # noqa: S607
)
output = result.stderr.decode()
rsa_key = re.search(r"RSA key ([0-9A-F]+)", output)
eddsa_key = re.search(r"EDDSA key ([0-9A-F]+)", output)
email = re.search(r'issuer "([^"]+)"', output)
# Try multiple patterns — `Good signature from` is the most reliable
# source of the email; `issuer` is a fallback for older gpg output.
email_patterns = (
r'Good signature from ".*?<([^>]+)>"',
r'aka ".*?<([^>]+)>"',
r'issuer "([^"]+)"',
)
email_result: Optional[str] = None
for pattern in email_patterns:
match = re.search(pattern, output)
if match:
email_result = match.group(1)
break
rsa_key_result = rsa_key.group(1) if rsa_key else None
eddsa_key_result = eddsa_key.group(1) if eddsa_key else None
email_result = email.group(1) if email else None
key_result = rsa_key_result or eddsa_key_result
# Debugging:
if key_result:
print("RSA or EDDSA Key found")
else:
print("Warning: No RSA or EDDSA key found in GPG verification output.")
if email_result:
print("email found")
print(f"Email found: {email_result}")
else:
print("Warning: No email address found in GPG verification output.")
if "No public key" in output:
print(
"Hint: public key is not in your keyring. Import it with:\n"
f" curl -s {KEYS_URL} | gpg --import"
)
return key_result, email_result

View File

@@ -96,7 +96,7 @@
"fs-extra": "^11.3.4",
"fuse.js": "^7.3.0",
"geolib": "^3.3.14",
"geostyler": "^18.5.0",
"geostyler": "^18.5.1",
"geostyler-data": "^1.1.0",
"geostyler-openlayers-parser": "^5.7.0",
"geostyler-style": "11.0.2",
@@ -24760,9 +24760,9 @@
"license": "MIT"
},
"node_modules/geostyler": {
"version": "18.5.0",
"resolved": "https://registry.npmjs.org/geostyler/-/geostyler-18.5.0.tgz",
"integrity": "sha512-azjLMEhrTQot+pU3phfSrUZI7CdetyAl7JNAnxrGaPA/E/5mmyoPQugZso3CfIuIBwOtFLmfB36SLE/FeGFakA==",
"version": "18.5.1",
"resolved": "https://registry.npmjs.org/geostyler/-/geostyler-18.5.1.tgz",
"integrity": "sha512-5+vLuDo1oR4QQTnrfkccIQSe3qEn0ytV9dLiFFhnxhPdziv/Wp3vKNhJZ37MUF5yIj2ISWZ+q/VmSNH6ifvWpg==",
"license": "BSD-2-Clause",
"dependencies": {
"@ant-design/icons": "^5.5.1",

View File

@@ -177,7 +177,7 @@
"fs-extra": "^11.3.4",
"fuse.js": "^7.3.0",
"geolib": "^3.3.14",
"geostyler": "^18.5.0",
"geostyler": "^18.5.1",
"geostyler-data": "^1.1.0",
"geostyler-openlayers-parser": "^5.7.0",
"geostyler-style": "11.0.2",

View File

@@ -796,45 +796,63 @@ const config: ControlPanelConfig = {
},
);
}
const { colnames, coltypes } =
const { colnames: queryColnames, coltypes: queryColtypes } =
chart?.queriesResponse?.[0] ?? {};
const allColumns =
Array.isArray(colnames) && Array.isArray(coltypes)
? [
{
value: ObjectFormattingEnum.ENTIRE_ROW,
label: t('entire row'),
dataType: GenericDataType.String,
},
...colnames.map((colname: string, index: number) => ({
const hasQueryColumns =
Array.isArray(queryColnames) &&
Array.isArray(queryColtypes) &&
queryColnames.length > 0;
// Fall back to datasource columns when query results are empty
const datasourceColumns = ensureIsArray(
(explore?.datasource as Dataset)?.columns,
);
const colnames = hasQueryColumns
? queryColnames
: datasourceColumns.map((col: ColumnMeta) => col.column_name);
const coltypes = hasQueryColumns
? queryColtypes
: datasourceColumns.map(
(col: ColumnMeta) =>
col.type_generic ?? GenericDataType.String,
);
const hasColumns = colnames.length > 0;
const allColumns = hasColumns
? [
{
value: ObjectFormattingEnum.ENTIRE_ROW,
label: t('entire row'),
dataType: GenericDataType.String,
},
...colnames.map((colname: string, index: number) => ({
value: colname,
label: Array.isArray(verboseMap)
? colname
: (verboseMap[colname] ?? colname),
dataType: coltypes[index],
})),
]
: [];
const numericColumns = hasColumns
? colnames.reduce((acc, colname, index) => {
if (
coltypes[index] === GenericDataType.Numeric ||
(!hasTimeComparison &&
(coltypes[index] === GenericDataType.String ||
coltypes[index] === GenericDataType.Boolean))
) {
acc.push({
value: colname,
label: Array.isArray(verboseMap)
? colname
: (verboseMap[colname] ?? colname),
dataType: coltypes[index],
})),
]
: [];
const numericColumns =
Array.isArray(colnames) && Array.isArray(coltypes)
? colnames.reduce((acc, colname, index) => {
if (
coltypes[index] === GenericDataType.Numeric ||
(!hasTimeComparison &&
(coltypes[index] === GenericDataType.String ||
coltypes[index] === GenericDataType.Boolean))
) {
acc.push({
value: colname,
label: Array.isArray(verboseMap)
? colname
: (verboseMap[colname] ?? colname),
dataType: coltypes[index],
});
}
return acc;
}, [])
: [];
});
}
return acc;
}, [])
: [];
const columnOptions = hasTimeComparison
? processComparisonColumns(
numericColumns || [],

View File

@@ -25,6 +25,7 @@ import {
ControlPanelState,
ControlState,
ColorSchemeEnum,
ObjectFormattingEnum,
} from '@superset-ui/chart-controls';
import config from '../src/controlPanel';
@@ -55,11 +56,12 @@ const createMockControlState = (value: string[] | undefined): ControlState => ({
const createMockExplore = (
timeCompareValue: string[] | undefined,
datasourceColumns: Partial<Dataset>['columns'] = [],
): ControlPanelState => ({
slice: { slice_id: 123 },
datasource: {
verbose_map: { col1: 'Column 1', col2: 'Column 2' },
columns: [],
columns: datasourceColumns,
} as Partial<Dataset> as Dataset,
controls: {
time_compare: createMockControlState(timeCompareValue),
@@ -206,3 +208,144 @@ test('static extraColorChoices removed from config', () => {
expect(controlConfig?.extraColorChoices).toBeUndefined();
});
test('columnOptions falls back to datasource columns when queriesResponse is empty', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const datasourceColumns = [
{ column_name: 'revenue', type_generic: GenericDataType.Numeric },
{ column_name: 'name', type_generic: GenericDataType.String },
];
const explore = createMockExplore(undefined, datasourceColumns);
const chart = { chartStatus: 'success' as const, queriesResponse: null };
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.columnOptions).toEqual(
expect.arrayContaining([
expect.objectContaining({ value: 'revenue' }),
expect.objectContaining({ value: 'name' }),
]),
);
expect(result.allColumns).toEqual(
expect.arrayContaining([
expect.objectContaining({ value: 'revenue' }),
expect.objectContaining({ value: 'name' }),
]),
);
});
test('columnOptions prefers queriesResponse over datasource columns', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const datasourceColumns = [
{ column_name: 'revenue', type_generic: GenericDataType.Numeric },
{ column_name: 'extra_col', type_generic: GenericDataType.String },
];
const explore = createMockExplore(undefined, datasourceColumns);
const chart = createMockChart();
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.columnOptions).toEqual(
expect.arrayContaining([
expect.objectContaining({ value: 'col1' }),
expect.objectContaining({ value: 'col2' }),
]),
);
expect(result.columnOptions).not.toEqual(
expect.arrayContaining([expect.objectContaining({ value: 'extra_col' })]),
);
});
test('columnOptions falls back to datasource when queriesResponse has empty colnames', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const datasourceColumns = [
{ column_name: 'revenue', type_generic: GenericDataType.Numeric },
];
const explore = createMockExplore(undefined, datasourceColumns);
const chart = {
chartStatus: 'success' as const,
queriesResponse: [{ colnames: [], coltypes: [] }],
};
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.columnOptions).toEqual(
expect.arrayContaining([expect.objectContaining({ value: 'revenue' })]),
);
});
test('columnOptions returns empty when both queriesResponse and datasource have no columns', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const explore = createMockExplore(undefined, []);
const chart = { chartStatus: 'success' as const, queriesResponse: null };
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.columnOptions).toEqual([]);
expect(result.allColumns).toEqual([]);
});
test('allColumns includes ENTIRE_ROW when falling back to datasource columns', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const datasourceColumns = [
{ column_name: 'revenue', type_generic: GenericDataType.Numeric },
];
const explore = createMockExplore(undefined, datasourceColumns);
const chart = { chartStatus: 'success' as const, queriesResponse: null };
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.allColumns).toEqual(
expect.arrayContaining([
expect.objectContaining({ value: ObjectFormattingEnum.ENTIRE_ROW }),
]),
);
});
test('columnOptions defaults type_generic to String when missing from datasource columns', () => {
const controlConfig = findConditionalFormattingControl();
expect(controlConfig).toBeTruthy();
const datasourceColumns = [{ column_name: 'untyped_col' }];
const explore = createMockExplore(undefined, datasourceColumns);
const chart = { chartStatus: 'success' as const, queriesResponse: null };
const result = controlConfig!.mapStateToProps!(
explore,
createMockControlStateForConditionalFormatting(),
chart,
);
expect(result.columnOptions).toEqual(
expect.arrayContaining([
expect.objectContaining({
value: 'untyped_col',
dataType: GenericDataType.String,
}),
]),
);
});

View File

@@ -165,6 +165,10 @@ class PrestoBaseEngineSpec(BaseEngineSpec, metaclass=ABCMeta):
supports_dynamic_schema = True
supports_catalog = supports_dynamic_catalog = supports_cross_catalog_queries = True
# Presto/Trino don't reliably support IS true/false on computed boolean
# expressions (e.g. columns defined as `(expiration = 1) AS expiration`),
# which raises a query error. Use = true/false instead.
use_equality_for_boolean_filters = True
column_type_mappings = (
(

View File

@@ -81,6 +81,10 @@ from superset.utils.core import (
)
from superset.utils.date_parser import get_since_until, parse_past_timedelta
from superset.utils.hashing import hash_from_str
from superset.utils.pandas_postprocessing.utils import (
escape_separator,
FLAT_COLUMN_SEPARATOR,
)
if TYPE_CHECKING:
from superset.connectors.sqla.models import BaseDatasource
@@ -763,6 +767,11 @@ class TimeTableViz(BaseViz):
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
if isinstance(pt.columns, pd.MultiIndex):
pt.columns = [
FLAT_COLUMN_SEPARATOR.join(escape_separator(str(s)) for s in col)
for col in pt.columns
]
return {
"records": pt.to_dict(orient="index"),
"columns": list(pt.columns),

View File

@@ -28,6 +28,7 @@ import superset.viz as viz
from flask import current_app
from superset.exceptions import QueryObjectValidationError, SpatialException
from superset.utils.core import DTTM_ALIAS
from superset.utils.pandas_postprocessing.utils import FLAT_COLUMN_SEPARATOR
from tests.conftest import with_config
from .base_tests import SupersetTestCase
@@ -626,6 +627,44 @@ class TestTimeSeriesTableViz(SupersetTestCase):
}
assert expected == data["records"]
def test_get_data_multiple_group_by(self):
sep = FLAT_COLUMN_SEPARATOR
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1", "groupby2"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t1, t2, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40, 45, 50]
raw["groupby1"] = ["a1", "a2", "a1", "a2", "a1", "a2", "a1", "a2"]
raw["groupby2"] = ["b1", "b1", "b2", "b2", "b1", "b1", "b2", "b2"]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Columns should be flattened strings, not tuples
a1_b1 = f"a1{sep}b1"
a1_b2 = f"a1{sep}b2"
a2_b1 = f"a2{sep}b1"
a2_b2 = f"a2{sep}b2"
assert {a1_b1, a1_b2, a2_b1, a2_b2} == set(data["columns"])
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {
a1_b1: 15,
a1_b2: 25,
a2_b1: 20,
a2_b2: 30,
},
t2.strftime(time_format): {
a1_b1: 35,
a1_b2: 45,
a2_b1: 40,
a2_b2: 50,
},
}
assert expected == data["records"]
assert data["is_group_by"] is True
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()

View File

@@ -342,3 +342,47 @@ SELECT * \nFROM my_catalog.my_schema.my_table
LIMIT :param_1
""".strip()
)
def test_handle_boolean_filter() -> None:
"""
Test that Presto uses equality operators for boolean filters instead of IS,
since `col IS TRUE` can fail on computed boolean expressions like
`(expiration = 1) AS expiration`.
"""
from sqlalchemy import Boolean, Column
from superset.db_engine_specs.presto import PrestoEngineSpec
from superset.utils.core import FilterOperator
bool_col = Column("test_col", Boolean)
result_true = PrestoEngineSpec.handle_boolean_filter(
bool_col, FilterOperator.IS_TRUE, True
)
assert (
str(result_true.compile(compile_kwargs={"literal_binds": True}))
== "test_col = true"
)
result_false = PrestoEngineSpec.handle_boolean_filter(
bool_col, FilterOperator.IS_FALSE, False
)
assert (
str(result_false.compile(compile_kwargs={"literal_binds": True}))
== "test_col = false"
)
# Regression: the original bug was on computed boolean columns like
# `(expiration = 1) AS expiration`. Verify the equality operator also
# compiles correctly when the "column" is a computed expression.
from sqlalchemy import literal_column
computed_col = literal_column("(expiration = 1)")
result_computed = PrestoEngineSpec.handle_boolean_filter(
computed_col, FilterOperator.IS_TRUE, True
)
assert (
str(result_computed.compile(compile_kwargs={"literal_binds": True}))
== "(expiration = 1) = true"
)

View File

@@ -1443,3 +1443,47 @@ def test_handle_cursor_commits_on_progress_text_change(
# There should be commits for progress_text changes
assert mock_db.session.commit.call_count >= 2
def test_handle_boolean_filter() -> None:
"""
Test that Trino uses equality operators for boolean filters instead of IS,
since `col IS TRUE` can fail on computed boolean expressions like
`(expiration = 1) AS expiration`.
"""
from sqlalchemy import Boolean, Column
from superset.db_engine_specs.trino import TrinoEngineSpec
from superset.utils.core import FilterOperator
bool_col = Column("test_col", Boolean)
result_true = TrinoEngineSpec.handle_boolean_filter(
bool_col, FilterOperator.IS_TRUE, True
)
assert (
str(result_true.compile(compile_kwargs={"literal_binds": True}))
== "test_col = true"
)
result_false = TrinoEngineSpec.handle_boolean_filter(
bool_col, FilterOperator.IS_FALSE, False
)
assert (
str(result_false.compile(compile_kwargs={"literal_binds": True}))
== "test_col = false"
)
# Regression: the original bug was on computed boolean columns like
# `(expiration = 1) AS expiration`. Verify the equality operator also
# compiles correctly when the "column" is a computed expression.
from sqlalchemy import literal_column
computed_col = literal_column("(expiration = 1)")
result_computed = TrinoEngineSpec.handle_boolean_filter(
computed_col, FilterOperator.IS_TRUE, True
)
assert (
str(result_computed.compile(compile_kwargs={"literal_binds": True}))
== "(expiration = 1) = true"
)