Compare commits

..

7 Commits

Author SHA1 Message Date
Beto Dealmeida
35d1a6c21c fix: render Jinja templates in ORDER BY adhoc metrics
When processing adhoc metrics in ORDER BY clauses during query execution,
Jinja templates were not being rendered because `processed=True` was
passed without providing template processing.

This commit:
1. Updates adhoc_metric_to_sqla() to apply template processing even when
   processed=True (meaning SQL is already sanitized)
2. Passes template_processor when converting orderby adhoc metrics
3. Removes obsolete test that expected error handling removed in commit
   add087cbfe

The fix ensures that:
- During validation: SQL is sanitized but Jinja templates are preserved
  (template_processor=None)
- During execution: Jinja templates are rendered (template_processor
  provided, processed=True skips re-sanitization)

Fixes test: test_chart_data_table_chart_with_time_grain_filter

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 12:50:38 -05:00
Beto Dealmeida
add087cbfe Raise exceptions 2025-11-13 10:11:54 -05:00
Beto Dealmeida
29256a40bc Fix logic 2025-11-12 15:59:39 -05:00
Beto Dealmeida
89afd6fefc fix: prevent dict mutation during SQL expression sanitization
Address feedback on cache key stability fix:

1. **Fix in-place mutation during validation**
   - Changed _sanitize_metrics_expressions() to create new dicts instead of mutating
   - Changed _sanitize_orderby_expressions() to create new tuples/dicts
   - Prevents unexpected side effects when adhoc metrics are shared across queries

2. **Add comprehensive tests**
   - test_sql_expressions_processed_during_validation: Verifies SQL processing
   - test_validation_does_not_mutate_original_dicts: Ensures no mutation
   - test_validation_with_multiple_adhoc_metrics: Tests multiple metrics
   - test_validation_preserves_jinja_templates: Verifies Jinja preservation
   - test_validation_without_processing_methods: Tests graceful degradation
   - test_validation_serialization_stability: Tests JSON serialization stability

3. **Performance optimization**
   - Added early returns when no adhoc expressions to process
   - Reduces unnecessary function calls

This ensures that:
- Cache keys remain stable across validation and execution
- Original metric dicts are not mutated (preventing composite query issues)
- Jinja templates are preserved for runtime processing
- The fix works even when datasources lack processing methods

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-12 15:59:39 -05:00
Beto Dealmeida
4bcbe471cc Lint 2025-11-12 15:59:39 -05:00
Beto Dealmeida
47c58603a9 Fix style 2025-11-12 15:59:39 -05:00
Beto Dealmeida
6ec4a25295 fix: prevent cache key mismatch by processing SQL expressions during validation
Root Cause:
SQL expressions in adhoc metrics and orderby were being processed
(uppercased via sanitize_clause()) during query execution, causing
cache key mismatches in composite queries where:
1. Celery task processes and caches with processed expressions
2. Later requests compute cache keys from unprocessed expressions
3. Keys don't match → 422 error

The Fix:
Process SQL expressions during QueryObject.validate() BEFORE cache key
generation, ensuring both cache key computation and query execution use
the same processed expressions.

Changes:
- superset/common/query_object.py:
  * Add _sanitize_sql_expressions() called in validate()
  * Process metrics and orderby SQL expressions before caching

- superset/models/helpers.py:
  * Pass processed=True to adhoc_metric_to_sqla() in get_sqla_query()
  * Skip re-processing since validate() already handled it

- tests/unit_tests/connectors/sqla/test_orderby_mutation.py:
  * Add regression test documenting the fix
2025-11-12 15:59:39 -05:00
17 changed files with 629 additions and 606 deletions

View File

@@ -67,7 +67,6 @@ export function normalizeTimeColumn(
sqlExpression: formData.x_axis,
label: formData.x_axis,
expressionType: 'SQL',
isColumnReference: true,
};
}

View File

@@ -27,7 +27,6 @@ export interface AdhocColumn {
optionName?: string;
sqlExpression: string;
expressionType: 'SQL';
isColumnReference?: boolean;
columnType?: 'BASE_AXIS' | 'SERIES';
timeGrain?: string;
datasourceWarning?: boolean;
@@ -75,10 +74,6 @@ export function isAdhocColumn(column?: any): column is AdhocColumn {
);
}
export function isAdhocColumnReference(column?: any): column is AdhocColumn {
return isAdhocColumn(column) && column?.isColumnReference === true;
}
export function isQueryFormColumn(column: any): column is QueryFormColumn {
return isPhysicalColumn(column) || isAdhocColumn(column);
}

View File

@@ -86,7 +86,6 @@ test('should support different columns for x-axis and granularity', () => {
{
timeGrain: 'P1Y',
columnType: 'BASE_AXIS',
isColumnReference: true,
sqlExpression: 'time_column_in_x_axis',
label: 'time_column_in_x_axis',
expressionType: 'SQL',

View File

@@ -101,35 +101,36 @@ describe('queryObject conversion', () => {
it('should convert queryObject', () => {
const { queries } = buildQuery({ ...formData, x_axis: 'time_column' });
expect(queries[0]).toMatchObject({
granularity: 'time_column',
time_range: '1 year ago : 2013',
extras: { having: '', where: '', time_grain_sqla: 'P1Y' },
columns: [
{
columnType: 'BASE_AXIS',
expressionType: 'SQL',
label: 'time_column',
sqlExpression: 'time_column',
timeGrain: 'P1Y',
isColumnReference: true,
},
'col1',
],
series_columns: ['col1'],
metrics: ['count(*)'],
post_processing: [
{
operation: 'pivot',
options: {
aggregates: { 'count(*)': { operator: 'mean' } },
columns: ['col1'],
drop_missing_columns: true,
index: ['time_column'],
expect(queries[0]).toEqual(
expect.objectContaining({
granularity: 'time_column',
time_range: '1 year ago : 2013',
extras: { having: '', where: '', time_grain_sqla: 'P1Y' },
columns: [
{
columnType: 'BASE_AXIS',
expressionType: 'SQL',
label: 'time_column',
sqlExpression: 'time_column',
timeGrain: 'P1Y',
},
},
{ operation: 'flatten' },
],
});
'col1',
],
series_columns: ['col1'],
metrics: ['count(*)'],
post_processing: [
{
operation: 'pivot',
options: {
aggregates: { 'count(*)': { operator: 'mean' } },
columns: ['col1'],
drop_missing_columns: true,
index: ['time_column'],
},
},
{ operation: 'flatten' },
],
}),
);
});
});

View File

@@ -139,31 +139,6 @@ function cellWidth({
return perc2;
}
/**
* Sanitize a column identifier for use in HTML id attributes and CSS selectors.
* Replaces characters that are invalid in CSS selectors with safe alternatives.
*
* Note: The returned value should be prefixed with a string (e.g., "header-")
* to ensure it forms a valid HTML ID (IDs cannot start with a digit).
*
* Exported for testing.
*/
export function sanitizeHeaderId(columnId: string): string {
return (
columnId
// Semantic replacements first: preserve meaning in IDs for readability
// (e.g., '%pct_nice' → 'percentpct_nice' instead of '_pct_nice')
.replace(/%/g, 'percent')
.replace(/#/g, 'hash')
.replace(/△/g, 'delta')
// Generic sanitization for remaining special characters
.replace(/\s+/g, '_')
.replace(/[^a-zA-Z0-9_-]/g, '_')
.replace(/_+/g, '_') // Collapse consecutive underscores
.replace(/^_+|_+$/g, '') // Trim leading/trailing underscores
);
}
/**
* Cell left margin (offset) calculation for horizontal bar chart elements
* when alignPositiveNegative is not set
@@ -869,9 +844,6 @@ export default function TableChart<D extends DataRecord = DataRecord>(
}
}
// Cache sanitized header ID to avoid recomputing it multiple times
const headerId = sanitizeHeaderId(column.originalLabel ?? column.key);
return {
id: String(i), // to allow duplicate column keys
// must use custom accessor to allow `.` in column names
@@ -997,7 +969,7 @@ export default function TableChart<D extends DataRecord = DataRecord>(
}
const cellProps = {
'aria-labelledby': `header-${headerId}`,
'aria-labelledby': `header-${column.key}`,
role: 'cell',
// show raw number in title in case of numeric values
title: typeof value === 'number' ? String(value) : undefined,
@@ -1084,7 +1056,7 @@ export default function TableChart<D extends DataRecord = DataRecord>(
},
Header: ({ column: col, onClick, style, onDragStart, onDrop }) => (
<th
id={`header-${headerId}`}
id={`header-${column.originalLabel}`}
title={t('Shift + Click to sort by multiple columns')}
className={[className, col.isSorted ? 'is-sorted' : ''].join(' ')}
style={{

View File

@@ -18,93 +18,15 @@
*/
import '@testing-library/jest-dom';
import { render, screen } from '@superset-ui/core/spec';
import { cloneDeep } from 'lodash';
import TableChart, { sanitizeHeaderId } from '../src/TableChart';
import TableChart from '../src/TableChart';
import transformProps from '../src/transformProps';
import DateWithFormatter from '../src/utils/DateWithFormatter';
import testData from './testData';
import { ProviderWrapper } from './testHelpers';
test('sanitizeHeaderId should sanitize percent sign', () => {
expect(sanitizeHeaderId('%pct_nice')).toBe('percentpct_nice');
});
test('sanitizeHeaderId should sanitize hash/pound sign', () => {
expect(sanitizeHeaderId('# metric_1')).toBe('hash_metric_1');
});
test('sanitizeHeaderId should sanitize delta symbol', () => {
expect(sanitizeHeaderId('△ delta')).toBe('delta_delta');
});
test('sanitizeHeaderId should replace spaces with underscores', () => {
expect(sanitizeHeaderId('Main metric_1')).toBe('Main_metric_1');
expect(sanitizeHeaderId('multiple spaces')).toBe('multiple_spaces');
});
test('sanitizeHeaderId should handle multiple special characters', () => {
expect(sanitizeHeaderId('% #△ test')).toBe('percent_hashdelta_test');
expect(sanitizeHeaderId('% # △ test')).toBe('percent_hash_delta_test');
});
test('sanitizeHeaderId should preserve alphanumeric, underscore, and hyphen', () => {
expect(sanitizeHeaderId('valid-name_123')).toBe('valid-name_123');
});
test('sanitizeHeaderId should replace other special characters with underscore', () => {
expect(sanitizeHeaderId('col@name!test')).toBe('col_name_test');
expect(sanitizeHeaderId('test.column')).toBe('test_column');
});
test('sanitizeHeaderId should handle edge cases', () => {
expect(sanitizeHeaderId('')).toBe('');
expect(sanitizeHeaderId('simple')).toBe('simple');
});
test('sanitizeHeaderId should collapse consecutive underscores', () => {
expect(sanitizeHeaderId('test @@ space')).toBe('test_space');
expect(sanitizeHeaderId('col___name')).toBe('col_name');
expect(sanitizeHeaderId('a b c')).toBe('a_b_c');
expect(sanitizeHeaderId('test@@name')).toBe('test_name');
});
test('sanitizeHeaderId should remove leading underscores', () => {
expect(sanitizeHeaderId('@col')).toBe('col');
expect(sanitizeHeaderId('!revenue')).toBe('revenue');
expect(sanitizeHeaderId('@@test')).toBe('test');
expect(sanitizeHeaderId(' leading_spaces')).toBe('leading_spaces');
});
test('sanitizeHeaderId should remove trailing underscores', () => {
expect(sanitizeHeaderId('col@')).toBe('col');
expect(sanitizeHeaderId('revenue!')).toBe('revenue');
expect(sanitizeHeaderId('test@@')).toBe('test');
expect(sanitizeHeaderId('trailing_spaces ')).toBe('trailing_spaces');
});
test('sanitizeHeaderId should remove leading and trailing underscores', () => {
expect(sanitizeHeaderId('@col@')).toBe('col');
expect(sanitizeHeaderId('!test!')).toBe('test');
expect(sanitizeHeaderId(' spaced ')).toBe('spaced');
expect(sanitizeHeaderId('@@multiple@@')).toBe('multiple');
});
test('sanitizeHeaderId should handle inputs with only special characters', () => {
expect(sanitizeHeaderId('@')).toBe('');
expect(sanitizeHeaderId('@@')).toBe('');
expect(sanitizeHeaderId(' ')).toBe('');
expect(sanitizeHeaderId('!@$')).toBe('');
expect(sanitizeHeaderId('!@#$')).toBe('hash'); // # is replaced with 'hash' (semantic replacement)
// Semantic replacements produce readable output even when alone
expect(sanitizeHeaderId('%')).toBe('percent');
expect(sanitizeHeaderId('#')).toBe('hash');
expect(sanitizeHeaderId('△')).toBe('delta');
expect(sanitizeHeaderId('% # △')).toBe('percent_hash_delta');
});
describe('plugin-chart-table', () => {
describe('transformProps', () => {
test('should parse pageLength to pageSize', () => {
it('should parse pageLength to pageSize', () => {
expect(transformProps(testData.basic).pageSize).toBe(20);
expect(
transformProps({
@@ -120,13 +42,13 @@ describe('plugin-chart-table', () => {
).toBe(0);
});
test('should memoize data records', () => {
it('should memoize data records', () => {
expect(transformProps(testData.basic).data).toBe(
transformProps(testData.basic).data,
);
});
test('should memoize columns meta', () => {
it('should memoize columns meta', () => {
expect(transformProps(testData.basic).columns).toBe(
transformProps({
...testData.basic,
@@ -135,14 +57,14 @@ describe('plugin-chart-table', () => {
);
});
test('should format timestamp', () => {
it('should format timestamp', () => {
// eslint-disable-next-line no-underscore-dangle
const parsedDate = transformProps(testData.basic).data[0]
.__timestamp as DateWithFormatter;
expect(String(parsedDate)).toBe('2020-01-01 12:34:56');
expect(parsedDate.getTime()).toBe(1577882096000);
});
test('should process comparison columns when time_compare and comparison_type are set', () => {
it('should process comparison columns when time_compare and comparison_type are set', () => {
const transformedProps = transformProps(testData.comparison);
const comparisonColumns = transformedProps.columns.filter(
col =>
@@ -164,7 +86,7 @@ describe('plugin-chart-table', () => {
expect(comparisonColumns.some(col => col.label === '%')).toBe(true);
});
test('should not process comparison columns when time_compare is empty', () => {
it('should not process comparison columns when time_compare is empty', () => {
const propsWithoutTimeCompare = {
...testData.comparison,
rawFormData: {
@@ -187,7 +109,7 @@ describe('plugin-chart-table', () => {
expect(comparisonColumns.length).toBe(0);
});
test('should correctly apply column configuration for comparison columns', () => {
it('should correctly apply column configuration for comparison columns', () => {
const transformedProps = transformProps(testData.comparisonWithConfig);
const comparisonColumns = transformedProps.columns.filter(
@@ -225,7 +147,7 @@ describe('plugin-chart-table', () => {
expect(percentMetricConfig?.config).toEqual({ d3NumberFormat: '.3f' });
});
test('should correctly format comparison columns using getComparisonColFormatter', () => {
it('should correctly format comparison columns using getComparisonColFormatter', () => {
const transformedProps = transformProps(testData.comparisonWithConfig);
const comparisonColumns = transformedProps.columns.filter(
col =>
@@ -256,7 +178,7 @@ describe('plugin-chart-table', () => {
expect(formattedPercentMetric).toBe('0.123');
});
test('should set originalLabel for comparison columns when time_compare and comparison_type are set', () => {
it('should set originalLabel for comparison columns when time_compare and comparison_type are set', () => {
const transformedProps = transformProps(testData.comparison);
// Check if comparison columns are processed
@@ -343,7 +265,7 @@ describe('plugin-chart-table', () => {
});
describe('TableChart', () => {
test('render basic data', () => {
it('render basic data', () => {
render(
<TableChart {...transformProps(testData.basic)} sticky={false} />,
);
@@ -362,9 +284,12 @@ describe('plugin-chart-table', () => {
expect(cells[8]).toHaveTextContent('N/A');
});
test('render advanced data', () => {
it('render advanced data', () => {
render(
<TableChart {...transformProps(testData.advanced)} sticky={false} />,
<>
<TableChart {...transformProps(testData.advanced)} sticky={false} />
,
</>,
);
const secondColumnHeader = screen.getByText('Sum of Num');
expect(secondColumnHeader).toBeInTheDocument();
@@ -379,7 +304,7 @@ describe('plugin-chart-table', () => {
expect(cells[4]).toHaveTextContent('2.47k');
});
test('render advanced data with currencies', () => {
it('render advanced data with currencies', () => {
render(
ProviderWrapper({
children: (
@@ -399,7 +324,7 @@ describe('plugin-chart-table', () => {
expect(cells[4]).toHaveTextContent('$ 2.47k');
});
test('render data with a bigint value in a raw record mode', () => {
it('render data with a bigint value in a raw record mode', () => {
render(
ProviderWrapper({
children: (
@@ -420,7 +345,7 @@ describe('plugin-chart-table', () => {
expect(cells[3]).toHaveTextContent('1234567890123456789');
});
test('render raw data', () => {
it('render raw data', () => {
const props = transformProps({
...testData.raw,
rawFormData: { ...testData.raw.rawFormData },
@@ -437,7 +362,7 @@ describe('plugin-chart-table', () => {
expect(cells[1]).toHaveTextContent('0');
});
test('render raw data with currencies', () => {
it('render raw data with currencies', () => {
const props = transformProps({
...testData.raw,
rawFormData: {
@@ -462,7 +387,7 @@ describe('plugin-chart-table', () => {
expect(cells[2]).toHaveTextContent('$ 0');
});
test('render small formatted data with currencies', () => {
it('render small formatted data with currencies', () => {
const props = transformProps({
...testData.raw,
rawFormData: {
@@ -504,14 +429,14 @@ describe('plugin-chart-table', () => {
expect(cells[2]).toHaveTextContent('$ 0.61');
});
test('render empty data', () => {
it('render empty data', () => {
render(
<TableChart {...transformProps(testData.empty)} sticky={false} />,
);
expect(screen.getByText('No records found')).toBeInTheDocument();
});
test('render color with column color formatter', () => {
it('render color with column color formatter', () => {
render(
ProviderWrapper({
children: (
@@ -541,8 +466,8 @@ describe('plugin-chart-table', () => {
expect(getComputedStyle(screen.getByTitle('2467')).background).toBe('');
});
test('render cell without color', () => {
const dataWithEmptyCell = cloneDeep(testData.advanced.queriesData[0]);
it('render cell without color', () => {
const dataWithEmptyCell = testData.advanced.queriesData[0];
dataWithEmptyCell.data.push({
__timestamp: null,
name: 'Noah',
@@ -582,7 +507,7 @@ describe('plugin-chart-table', () => {
);
expect(getComputedStyle(screen.getByText('N/A')).background).toBe('');
});
test('should display original label in grouped headers', () => {
it('should display original label in grouped headers', () => {
const props = transformProps(testData.comparison);
render(<TableChart {...props} sticky={false} />);
@@ -597,142 +522,7 @@ describe('plugin-chart-table', () => {
expect(hasMetricHeaders).toBe(true);
});
test('should set meaningful header IDs for time-comparison columns', () => {
// Test time-comparison columns have proper IDs
// Uses originalLabel (e.g., "metric_1") which is sanitized for CSS safety
const props = transformProps(testData.comparison);
const { container } = render(<TableChart {...props} sticky={false} />);
const headers = screen.getAllByRole('columnheader');
// All headers should have IDs
const headersWithIds = headers.filter(header => header.id);
expect(headersWithIds.length).toBeGreaterThan(0);
// None should have "header-undefined"
const undefinedHeaders = headersWithIds.filter(header =>
header.id.includes('undefined'),
);
expect(undefinedHeaders).toHaveLength(0);
// Should have IDs based on sanitized originalLabel (e.g., "metric_1")
const hasMetricHeaders = headersWithIds.some(
header =>
header.id.includes('metric_1') || header.id.includes('metric_2'),
);
expect(hasMetricHeaders).toBe(true);
// CRITICAL: Verify sanitization - no spaces or special chars in any header ID
headersWithIds.forEach(header => {
// IDs must not contain spaces (would break CSS selectors and ARIA)
expect(header.id).not.toMatch(/\s/);
// IDs must not contain special chars like %, #, △
expect(header.id).not.toMatch(/[%#△]/);
// IDs should only contain valid characters: alphanumeric, underscore, hyphen
expect(header.id).toMatch(/^header-[a-zA-Z0-9_-]+$/);
});
// CRITICAL: Verify ALL cells reference valid headers (no broken ARIA)
const cellsWithLabels = container.querySelectorAll(
'td[aria-labelledby]',
);
cellsWithLabels.forEach(cell => {
const labelledBy = cell.getAttribute('aria-labelledby');
if (labelledBy) {
// Check that the ID doesn't contain spaces (would be interpreted as multiple IDs)
expect(labelledBy).not.toMatch(/\s/);
// Check that the ID doesn't contain special characters
expect(labelledBy).not.toMatch(/[%#△]/);
// Verify the referenced header actually exists
const referencedHeader = container.querySelector(
`#${CSS.escape(labelledBy)}`,
);
expect(referencedHeader).toBeTruthy();
}
});
});
test('should set meaningful header IDs for regular table columns', () => {
// Test regular (non-time-comparison) columns have proper IDs
// Uses fallback to column.key since originalLabel is undefined
const props = transformProps(testData.advanced);
const { container } = render(
ProviderWrapper({
children: <TableChart {...props} sticky={false} />,
}),
);
const headers = screen.getAllByRole('columnheader');
// Test 1: "name" column (regular string column)
const nameHeader = headers.find(header =>
header.textContent?.includes('name'),
);
expect(nameHeader).toBeDefined();
expect(nameHeader?.id).toBe('header-name'); // Falls back to column.key
// Verify cells reference this header correctly
const nameCells = container.querySelectorAll(
'td[aria-labelledby="header-name"]',
);
expect(nameCells.length).toBeGreaterThan(0);
// Test 2: "sum__num" column (metric with verbose map "Sum of Num")
const sumHeader = headers.find(header =>
header.textContent?.includes('Sum of Num'),
);
expect(sumHeader).toBeDefined();
expect(sumHeader?.id).toBe('header-sum_num'); // Falls back to column.key, consecutive underscores collapsed
// Verify cells reference this header correctly
const sumCells = container.querySelectorAll(
'td[aria-labelledby="header-sum_num"]',
);
expect(sumCells.length).toBeGreaterThan(0);
// Test 3: Verify NO headers have "undefined" in their ID
const undefinedHeaders = headers.filter(header =>
header.id?.includes('undefined'),
);
expect(undefinedHeaders).toHaveLength(0);
// Test 4: Verify ALL headers have proper IDs (no missing IDs)
const headersWithIds = headers.filter(header => header.id);
expect(headersWithIds.length).toBe(headers.length);
// Test 5: Verify ALL header IDs are properly sanitized
headersWithIds.forEach(header => {
// IDs must not contain spaces
expect(header.id).not.toMatch(/\s/);
// IDs must not contain special chars like % (from %pct_nice column)
expect(header.id).not.toMatch(/[%#△]/);
// IDs should only contain valid CSS selector characters
expect(header.id).toMatch(/^header-[a-zA-Z0-9_-]+$/);
});
// Test 6: Verify ALL cells reference valid headers (no broken ARIA)
const cellsWithLabels = container.querySelectorAll(
'td[aria-labelledby]',
);
cellsWithLabels.forEach(cell => {
const labelledBy = cell.getAttribute('aria-labelledby');
if (labelledBy) {
// Verify no spaces (would be interpreted as multiple IDs)
expect(labelledBy).not.toMatch(/\s/);
// Verify no special characters
expect(labelledBy).not.toMatch(/[%#△]/);
// Verify the referenced header actually exists
const referencedHeader = container.querySelector(
`#${CSS.escape(labelledBy)}`,
);
expect(referencedHeader).toBeTruthy();
}
});
});
test('render cell bars properly, and only when it is toggled on in both regular and percent metrics', () => {
it('render cell bars properly, and only when it is toggled on in both regular and percent metrics', () => {
const props = transformProps({
...testData.raw,
rawFormData: { ...testData.raw.rawFormData },
@@ -782,7 +572,7 @@ describe('plugin-chart-table', () => {
cells = document.querySelectorAll('td');
});
test('render color with string column color formatter(operator begins with)', () => {
it('render color with string column color formatter(operator begins with)', () => {
render(
ProviderWrapper({
children: (
@@ -814,7 +604,7 @@ describe('plugin-chart-table', () => {
);
});
test('render color with string column color formatter (operator ends with)', () => {
it('render color with string column color formatter (operator ends with)', () => {
render(
ProviderWrapper({
children: (
@@ -843,7 +633,7 @@ describe('plugin-chart-table', () => {
expect(getComputedStyle(screen.getByText('Joe')).background).toBe('');
});
test('render color with string column color formatter (operator containing)', () => {
it('render color with string column color formatter (operator containing)', () => {
render(
ProviderWrapper({
children: (
@@ -872,7 +662,7 @@ describe('plugin-chart-table', () => {
expect(getComputedStyle(screen.getByText('Joe')).background).toBe('');
});
test('render color with string column color formatter (operator not containing)', () => {
it('render color with string column color formatter (operator not containing)', () => {
render(
ProviderWrapper({
children: (
@@ -903,7 +693,7 @@ describe('plugin-chart-table', () => {
);
});
test('render color with string column color formatter (operator =)', () => {
it('render color with string column color formatter (operator =)', () => {
render(
ProviderWrapper({
children: (
@@ -934,7 +724,7 @@ describe('plugin-chart-table', () => {
);
});
test('render color with string column color formatter (operator None)', () => {
it('render color with string column color formatter (operator None)', () => {
render(
ProviderWrapper({
children: (
@@ -967,7 +757,7 @@ describe('plugin-chart-table', () => {
);
});
test('render color with column color formatter to entire row', () => {
it('render color with column color formatter to entire row', () => {
render(
ProviderWrapper({
children: (
@@ -1003,7 +793,7 @@ describe('plugin-chart-table', () => {
);
});
test('display text color using column color formatter', () => {
it('display text color using column color formatter', () => {
render(
ProviderWrapper({
children: (
@@ -1036,7 +826,7 @@ describe('plugin-chart-table', () => {
);
});
test('display text color using column color formatter for entire row', () => {
it('display text color using column color formatter for entire row', () => {
render(
ProviderWrapper({
children: (

View File

@@ -171,7 +171,7 @@ class ChartDataRestApi(ChartRestApi):
and query_context.result_format == ChartDataResultFormat.JSON
and query_context.result_type == ChartDataResultType.FULL
):
return self._run_async(json_body, command, add_extra_log_payload)
return self._run_async(json_body, command)
try:
form_data = json.loads(chart.params)
@@ -265,7 +265,7 @@ class ChartDataRestApi(ChartRestApi):
and query_context.result_format == ChartDataResultFormat.JSON
and query_context.result_type == ChartDataResultType.FULL
):
return self._run_async(json_body, command, add_extra_log_payload)
return self._run_async(json_body, command)
form_data = json_body.get("form_data")
return self._get_data_response(
@@ -334,10 +334,7 @@ class ChartDataRestApi(ChartRestApi):
return self._get_data_response(command, True)
def _run_async(
self,
form_data: dict[str, Any],
command: ChartDataCommand,
add_extra_log_payload: Callable[..., None] | None = None,
self, form_data: dict[str, Any], command: ChartDataCommand
) -> Response:
"""
Execute command as an async query.
@@ -346,10 +343,6 @@ class ChartDataRestApi(ChartRestApi):
with contextlib.suppress(ChartDataCacheLoadError):
result = command.run(force_cached=True)
if result is not None:
# Log is_cached if extra payload callback is provided.
# This indicates no async job was triggered - data was already cached
# and a synchronous response is being returned immediately.
self._log_is_cached(result, add_extra_log_payload)
return self._send_chart_response(result)
# Otherwise, kick off a background job to run the chart query.
# Clients will either poll or be notified of query completion,
@@ -431,25 +424,6 @@ class ChartDataRestApi(ChartRestApi):
return self.response_400(message=f"Unsupported result_format: {result_format}")
def _log_is_cached(
self,
result: dict[str, Any],
add_extra_log_payload: Callable[..., None] | None,
) -> None:
"""
Log is_cached values from query results to event logger.
Extracts is_cached from each query in the result and logs it.
If there's a single query, logs the boolean value directly.
If multiple queries, logs as a list.
"""
if add_extra_log_payload and result and "queries" in result:
is_cached_values = [query.get("is_cached") for query in result["queries"]]
if len(is_cached_values) == 1:
add_extra_log_payload(is_cached=is_cached_values[0])
elif is_cached_values:
add_extra_log_payload(is_cached=is_cached_values)
@event_logger.log_this
def _get_data_response(
self,
@@ -468,7 +442,12 @@ class ChartDataRestApi(ChartRestApi):
return self.response_400(message=exc.message)
# Log is_cached if extra payload callback is provided
self._log_is_cached(result, add_extra_log_payload)
if add_extra_log_payload and result and "queries" in result:
is_cached_values = [query.get("is_cached") for query in result["queries"]]
if len(is_cached_values) == 1:
add_extra_log_payload(is_cached=is_cached_values[0])
elif is_cached_values:
add_extra_log_payload(is_cached=is_cached_values)
return self._send_chart_response(result, form_data, datasource)

View File

@@ -193,7 +193,7 @@ class QueryObject: # pylint: disable=too-many-instance-attributes
return isinstance(metric, str) or is_adhoc_metric(metric)
self.metrics = metrics and [
x if is_str_or_adhoc(x) else x["label"] # type: ignore
x if is_str_or_adhoc(x) else x["label"] # type: ignore[misc,index]
for x in metrics
]
@@ -285,6 +285,7 @@ class QueryObject: # pylint: disable=too-many-instance-attributes
self._validate_no_have_duplicate_labels()
self._validate_time_offsets()
self._sanitize_filters()
self._sanitize_sql_expressions()
return None
except QueryObjectValidationError as ex:
if raise_exceptions:
@@ -359,6 +360,95 @@ class QueryObject: # pylint: disable=too-many-instance-attributes
except QueryClauseValidationException as ex:
raise QueryObjectValidationError(ex.message) from ex
def _sanitize_sql_expressions(self) -> None:
"""
Sanitize SQL expressions in adhoc metrics and orderby for consistent cache keys.
This processes SQL expressions before cache key generation, preventing cache
mismatches due to later processing during query execution.
"""
if not self.datasource or not hasattr(
self.datasource,
"_process_sql_expression",
):
return
# Process adhoc metrics
if self.metrics:
self._sanitize_metrics_expressions()
# Process orderby - these may contain adhoc metrics
if self.orderby:
self._sanitize_orderby_expressions()
def _sanitize_metrics_expressions(self) -> None:
"""
Process SQL expressions in adhoc metrics.
Creates new metric dictionaries to avoid mutating shared references.
"""
# datasource is checked in parent method, assert for type checking
assert self.datasource is not None
if not self.metrics:
return
sanitized_metrics = []
for metric in self.metrics:
if not (is_adhoc_metric(metric) and isinstance(metric, dict)):
sanitized_metrics.append(metric)
continue
if sql_expr := metric.get("sqlExpression"):
processed = self.datasource._process_select_expression(
expression=sql_expr,
database_id=self.datasource.database_id,
engine=self.datasource.database.backend,
schema=self.datasource.schema,
template_processor=None,
)
if processed and processed != sql_expr:
# Create new dict to avoid mutating shared references
sanitized_metrics.append({**metric, "sqlExpression": processed})
else:
sanitized_metrics.append(metric)
else:
sanitized_metrics.append(metric)
self.metrics = sanitized_metrics
def _sanitize_orderby_expressions(self) -> None:
"""
Process SQL expressions in orderby items.
Creates new tuples and dictionaries to avoid mutating shared references.
"""
# datasource is checked in parent method, assert for type checking
assert self.datasource is not None
if not self.orderby:
return
sanitized_orderby = []
for col, ascending in self.orderby:
if not (isinstance(col, dict) and col.get("sqlExpression")):
sanitized_orderby.append((col, ascending))
continue
processed = self.datasource._process_orderby_expression(
expression=col["sqlExpression"],
database_id=self.datasource.database_id,
engine=self.datasource.database.backend,
schema=self.datasource.schema,
template_processor=None,
)
if processed and processed != col["sqlExpression"]:
# Create new dict to avoid mutating shared references
sanitized_orderby.append(
({**col, "sqlExpression": processed}, ascending) # type: ignore[arg-type]
)
else:
sanitized_orderby.append((col, ascending))
self.orderby = sanitized_orderby
def _validate_there_are_no_missing_series(self) -> None:
missing_series = [col for col in self.series_columns if col not in self.columns]
if missing_series:

View File

@@ -1502,14 +1502,8 @@ class SqlaTable(
"""
label = utils.get_column_name(col)
try:
sql_expression = col["sqlExpression"]
# For column references, conditionally quote identifiers that need it
if col.get("isColumnReference"):
sql_expression = self.database.quote_identifier(sql_expression)
expression = self._process_select_expression(
expression=sql_expression,
expression=col["sqlExpression"],
database_id=self.database_id,
engine=self.database.backend,
schema=self.schema,

View File

@@ -1241,6 +1241,10 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
schema=self.schema,
template_processor=template_processor,
)
elif template_processor and expression:
# Even if already processed (sanitized), we still need to
# render Jinja templates
expression = template_processor.process_template(expression)
sqla_metric = literal_column(expression)
else:
@@ -1819,6 +1823,10 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
for metric in metrics:
if utils.is_adhoc_metric(metric):
assert isinstance(metric, dict)
# SQL expressions are sanitized during QueryObject.validate() via
# _sanitize_sql_expressions(), but we still process here to handle
# Jinja templates. sanitize_clause() is idempotent so re-sanitizing
# is safe.
metrics_exprs.append(
self.adhoc_metric_to_sqla(
metric=metric,
@@ -1855,19 +1863,18 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
col: Union[AdhocMetric, ColumnElement] = orig_col
if isinstance(col, dict):
col = cast(AdhocMetric, col)
if col.get("sqlExpression"):
col["sqlExpression"] = self._process_orderby_expression(
expression=col["sqlExpression"],
database_id=self.database_id,
engine=self.database.backend,
schema=self.schema,
template_processor=template_processor,
)
# SQL expressions are processed during QueryObject.validate() via
# _sanitize_sql_expressions() using ORDER BY wrapping. We pass
# processed=True to skip re-processing and avoid incorrect SELECT
# wrapping that breaks ORDER BY expressions. The removal of the
# _process_orderby_expression() call (which mutated the dict) prevents
# cache key mismatches.
if utils.is_adhoc_metric(col):
# add adhoc sort by column to columns_by_name if not exists
col = self.adhoc_metric_to_sqla(
col,
columns_by_name,
template_processor=template_processor,
processed=True,
)
# use the existing instance, if possible

View File

@@ -337,32 +337,16 @@ class SqlLabRestApi(BaseSupersetApi):
params = kwargs["rison"]
key = params.get("key")
rows = params.get("rows")
try:
result = SqlExecutionResultsCommand(key=key, rows=rows).run()
except Exception as ex:
logger.exception("Error fetching query results for key=%s", key)
return self.response_500(message=str(ex))
result = SqlExecutionResultsCommand(key=key, rows=rows).run()
# Using pessimistic json serialization since some database drivers can return
# unserializeable types at times
try:
payload = json.dumps(
result,
default=json.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
)
except Exception as ex:
logger.exception("Error serializing query results for key=%s", key)
return self.response_500(message="Unable to serialize query results")
# Use json_success with explicit Content-Type to ensure Flask 2.3+ correctly
# handles the response and doesn't trigger HTTP 406 errors due to content
# negotiation issues with Accept headers or proxy configurations
response = json_success(payload, 200)
# Explicitly set Content-Type as a safeguard against content negotiation issues
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
payload = json.dumps(
result,
default=json.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
)
return json_success(payload, 200)
@expose("/execute/", methods=("POST",))
@protect()
@@ -426,11 +410,8 @@ class SqlLabRestApi(BaseSupersetApi):
if command_result["status"] == SqlJsonExecutionStatus.QUERY_IS_RUNNING
else 200
)
# Return the execution result without special encoding
# Set explicit Content-Type to prevent Flask 2.3+ content negotiation issues
response = json_success(command_result["payload"], response_status)
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
# return the execution result without special encoding
return json_success(command_result["payload"], response_status)
except SqlLabException as ex:
payload = {"errors": [ex.to_dict()]}

View File

@@ -59,7 +59,6 @@ class AdhocColumn(TypedDict, total=False):
hasCustomLabel: Optional[bool]
label: str
sqlExpression: str
isColumnReference: Optional[bool]
columnType: Optional[Literal["BASE_AXIS", "SERIES"]]
timeGrain: Optional[str]

View File

@@ -20,8 +20,8 @@ msgstr ""
"POT-Creation-Date: 2025-04-29 12:34+0330\n"
"PO-Revision-Date: 2016-05-02 08:49-0700\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: es\n"
"Language-Team: Español; Castellano <>\n"
"Language: en\n"
"Language-Team: en <LL@li.org>\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -91,10 +91,6 @@ msgstr " uno nuevo"
msgid " at line %(line)d"
msgstr " en la línea %(line)d"
#, python-format
msgid " at line %(line)d"
msgstr ""
msgid " expression which needs to adhere to the "
msgstr " expresión que debe adherirse al "
@@ -188,14 +184,7 @@ msgstr "la frecuencia de %(report_type)s programación excede el límite. Config
#, python-format
msgid "%(rows)d rows returned"
msgstr "líneas obtenidas"
#, python-format
msgid ""
"%(subtitle)s\n"
"This may be triggered by:\n"
" %(issue)s"
msgstr ""
msgstr "%(rows)d filas devueltas"
#, fuzzy, python-format
msgid "%(suggestion)s instead of \"%(undefinedParameter)s?\""
@@ -266,7 +255,7 @@ msgstr "%s elementos no se han podido etiquetar porque no tienes permisos de edi
msgid "%s operator(s)"
msgstr "%s operador(es)"
#, python-format
#, fuzzy, python-format
msgid "%s option"
msgid_plural "%s options"
msgstr[0] "%s opción"
@@ -280,7 +269,7 @@ msgstr "%s opción(es)"
msgid "%s recipients"
msgstr "%s destinatarios"
#, python-format
#, fuzzy, python-format
msgid "%s row"
msgid_plural "%s rows"
msgstr[0] "%s fila"
@@ -1076,14 +1065,14 @@ msgstr "La consulta de alerta ha devuelto más de una columna."
#, python-format
msgid "Alert query returned more than one column. %(num_cols)s columns returned"
msgstr "La consulta de alerta devolvió más de una columna. %(num_cols)s columnas devueltas"
msgstr "La consulta de alerta ha devuelto más de una columna. %(num_cols)s columnas devueltas"
msgid "Alert query returned more than one row."
msgstr "La consulta de alerta ha devuelto más de una fila."
#, python-format
msgid "Alert query returned more than one row. %(num_rows)s rows returned"
msgstr "La consulta de alerta devolvió más de una fila. %(num_rows)s filas devueltas"
msgstr "La consulta de alerta ha devuelto más de una fila. %(num_rows)s filas devueltas"
msgid "Alert running"
msgstr "Alerta en ejecución"
@@ -1253,7 +1242,7 @@ msgid "An error occurred while creating %ss: %s"
msgstr "Se ha producido un error al crear %ss: %s"
msgid "An error occurred while creating the copy link."
msgstr "Se produjo un error en la creación %ss: %s"
msgstr "Se ha producido un error al crear el enlace de copia."
msgid "An error occurred while creating the data source"
msgstr "Se ha producido un error al crear la fuente de datos"
@@ -1350,7 +1339,7 @@ msgstr "Se ha producido un error al recuperar los valores del usuario: %s"
#, python-format
msgid "An error occurred while importing %s: %s"
msgstr "Se produjo un error importando %s: %s"
msgstr "Se ha producido un error al importar %s: %s"
msgid "An error occurred while loading dashboard information."
msgstr "Se ha producido un error al cargar la información del panel de control."
@@ -1546,7 +1535,7 @@ msgstr "Filtros aplicados (%s)"
#, python-format
msgid "Applied filters: %s"
msgstr "Filtros aplicados %s"
msgstr "Filtros aplicados: %s"
msgid ""
"Applied rolling window did not return any data. Please make sure the "
@@ -1588,7 +1577,7 @@ msgstr "¿Seguro que quieres eliminar?"
#, python-format
msgid "Are you sure you want to delete %s?"
msgstr "¿Está seguro de que desea eliminar %s?"
msgstr "¿Seguro que quieres eliminar %s?"
#, python-format
msgid "Are you sure you want to delete the selected %s?"
@@ -2249,7 +2238,7 @@ msgstr "Opciones del gráfico"
msgid "Chart Orientation"
msgstr "Orientación del gráfico"
#, python-format
#, fuzzy, python-format
msgid "Chart Owner: %s"
msgid_plural "Chart Owners: %s"
msgstr[0] "Propietario del gráfico: %s"
@@ -2263,15 +2252,15 @@ msgstr "Título del gráfico"
#, python-format
msgid "Chart [%s] has been overwritten"
msgstr "El gráfico [%s] ha sido sobreescrito"
msgstr "El gráfico [%s] se ha sobrescrito"
#, python-format
msgid "Chart [%s] has been saved"
msgstr "El gráfico [%s] ha sido guardado"
msgstr "El gráfico [%s] se ha guardado"
#, python-format
msgid "Chart [%s] was added to dashboard [%s]"
msgstr "El gráfico [%s] ha sido añadido al panel de control [%s]"
msgstr "El gráfico [%s] se ha añadido al panel de control [%s]"
msgid "Chart [{}] has been overwritten"
msgstr "El gráfico [{}] se ha sobrescrito"
@@ -2807,7 +2796,7 @@ msgid "Configuration"
msgstr "Configuración"
msgid "Configure Advanced Time Range "
msgstr "Configuración avanzada de rango de tiempo "
msgstr "Configurar el intervalo de tiempo avanzado "
msgid "Configure Time Range: Current..."
msgstr "Configurar el intervalo de tiempo: actual..."
@@ -2816,7 +2805,7 @@ msgid "Configure Time Range: Last..."
msgstr "Configurar el intervalo de tiempo: último..."
msgid "Configure Time Range: Previous..."
msgstr "Configurar Rango de Tiempo: Anteriores..."
msgstr "Configurar el intervalo de tiempo: anterior..."
msgid "Configure custom time range"
msgstr "Configurar intervalo de tiempo personalizado"
@@ -3825,7 +3814,7 @@ msgid_plural "Deleted %(num)d report schedules"
msgstr[0] "Se ha eliminado%(num)d programación de informe"
msgstr[1] "Se han eliminado%(num)d programaciones de informe"
#, python-format
#, fuzzy, python-format
msgid "Deleted %(num)d rules"
msgid_plural "Deleted %(num)d rules"
msgstr[0] "Se han eliminado%(num)d reglas"
@@ -4123,11 +4112,13 @@ msgstr "El desglose en detalle está deshabilitado para esta base de datos. Camb
msgid "Drill to detail: %s"
msgstr "Desglosar en detalle: %s"
#, fuzzy
msgid "Drop a column here or click"
msgid_plural "Drop columns here or click"
msgstr[0] "Suelta una columna aquí o haz clic"
msgstr[1] "Suelta las columnas aquí o haz clic"
#, fuzzy
msgid "Drop a column/metric here or click"
msgid_plural "Drop columns/metrics here or click"
msgstr[0] "Suelta una columna/métrica aquí o haz clic"
@@ -4194,10 +4185,10 @@ msgid "Duration in ms (1.40008 => 1ms 400µs 80ns)"
msgstr "Duración en ms (1,40008 => 1 ms 400 µs 80 ns)"
msgid "Duration in ms (100.40008 => 100ms 400µs 80ns)"
msgstr "Duración en ms (100.40008 => 100 ms 400 µs 80 ns)"
msgstr "Duración en ms (100,40008 => 100 ms 400 µs 80 ns)"
msgid "Duration in ms (10500 => 0:10.5)"
msgstr "Duración en ms (10500 => 0:10.5)"
msgstr "Duración en ms (10 500 => 0:10,5)"
msgid "Duration in ms (66000 => 1m 6s)"
msgstr "Duración en ms (66 000 => 1 m 6 s)"
@@ -4244,9 +4235,6 @@ msgstr "Editar CSS"
msgid "Edit CSS template properties"
msgstr "Editar propiedades de la plantilla CSS"
msgid "Edit Chart"
msgstr "Editar Gráfico"
msgid "Edit Chart Properties"
msgstr "Editar propiedades del gráfico"
@@ -4381,7 +4369,7 @@ msgid "Embed dashboard"
msgstr "Incrustar panel de control"
msgid "Embedded dashboard could not be deleted."
msgstr "El panel de control no pudo ser eliminado."
msgstr "No se ha podido eliminar el panel de control incrustado."
msgid "Embedding deactivated."
msgstr "Incrustación desactivada."
@@ -4465,7 +4453,7 @@ msgid "End date"
msgstr "Fecha final"
msgid "End date excluded from time range"
msgstr "Fecha final excluida del rango de tiempo"
msgstr "Fecha final excluida del intervalo de tiempo"
msgid "End date must be after start date"
msgstr "La fecha final debe ser posterior a la fecha inicial"
@@ -4584,6 +4572,9 @@ msgstr "Error al leer el archivo de Excel"
msgid "Error saving dataset"
msgstr "Error al guardar el conjunto de datos"
msgid "Error unfaving chart"
msgstr "Error al quitar el gráfico de favoritos"
msgid "Error while adding role!"
msgstr "Error al añadir el rol"
@@ -4593,9 +4584,6 @@ msgstr "Error al añadir el usuario"
msgid "Error while duplicating role!"
msgstr "Error al duplicar el rol"
msgid "Error unfaving chart"
msgstr "Error al quitar el gráfico de favoritos"
msgid "Error while fetching charts"
msgstr "Error al recuperar los gráficos"
@@ -5230,10 +5218,10 @@ msgid "Geometry Column"
msgstr "Columna de geometría"
msgid "Get the last date by the date unit."
msgstr "Obtiene la última fecha para la unidad de fecha especificada."
msgstr "Obtener la última fecha por la unidad de fecha."
msgid "Get the specify date for the holiday"
msgstr "Obtiene la fecha del día feriado especificado"
msgstr "Obtener la fecha especificada para el día festivo"
msgid "Give access to multiple catalogs in a single database connection."
msgstr "Da acceso a múltiples catálogos en una sola conexión de base de datos."
@@ -5508,7 +5496,7 @@ msgstr "Incluye una descripción que se enviará con tu informe"
#, python-format
msgid "Include description to be sent with %s"
msgstr "Incluye una descripción para ser enviada con %s"
msgstr "Incluye una descripción para enviarse con %s"
msgid "Include series name as an axis"
msgstr "Incluir el nombre de la serie como eje"
@@ -5596,7 +5584,7 @@ msgstr "JSON no válido"
#, python-format
msgid "Invalid advanced data type: %(advanced_data_type)s"
msgstr "Tipo de información avanzada inválida: %(advanced_data_type)s"
msgstr "Tipo de datos avanzados no válido: %(advanced_data_type)s"
msgid "Invalid certificate"
msgstr "Certificado no válido"
@@ -5669,7 +5657,7 @@ msgstr "Referencia no válida a la columna: «%(column)s»"
#, python-format
msgid "Invalid result type: %(result_type)s"
msgstr "Tipo de resultado inválido: %(result_type)s"
msgstr "Tipo de resultado no válido: %(result_type)s"
#, python-format
msgid "Invalid rolling_type: %(type)s"
@@ -5677,7 +5665,7 @@ msgstr "Tipo móvil no válido: %(type)s "
#, python-format
msgid "Invalid spatial point encountered: %(latlong)s"
msgstr "Se encontró un punto espacial inválido: %(latlong)s"
msgstr "Se ha encontrado un punto espacial no válido: %(latlong)s"
msgid "Invalid state."
msgstr "Estado no válido."
@@ -6463,7 +6451,7 @@ msgid "Middle"
msgstr "Medio"
msgid "Midnight"
msgstr "Media noche"
msgstr "Medianoche"
msgid "Miles"
msgstr "Millas"
@@ -6574,7 +6562,7 @@ msgstr "Mes"
#, python-format
msgid "Months %s"
msgstr "Meses %s"
msgstr "Meses %s "
msgid "More"
msgstr "Más"
@@ -6589,7 +6577,7 @@ msgid "Move only"
msgstr "Solo mover"
msgid "Moves the given set of dates by a specified interval."
msgstr "Desplaza el conjunto de fechas dado en un intervalo especificado."
msgstr "Mueve el conjunto de fechas en cuestión por un intervalo especificado."
msgid "Multi-Dimensions"
msgstr "Multidimensional"
@@ -6809,7 +6797,7 @@ msgid "No entities have this tag currently assigned"
msgstr "Ninguna entidad tiene esta etiqueta asignada actualmente"
msgid "No filter"
msgstr "Sin filtro"
msgstr "No hay ningún filtro"
msgid "No filter is selected."
msgstr "No se ha seleccionado ningún filtro."
@@ -6833,7 +6821,7 @@ msgid "No records found"
msgstr "No se han encontrado registros"
msgid "No results"
msgstr "Sin resultados"
msgstr "No hay resultados"
msgid "No results found"
msgstr "No se han encontrado resultados"
@@ -6842,7 +6830,7 @@ msgid "No results match your filter criteria"
msgstr "No hay resultados que coincidan con tus criterios de filtro"
msgid "No results were returned for this query"
msgstr "No se obtuvieron resultados para esta consulta"
msgstr "No se han devuelto resultados para esta consulta"
msgid ""
"No results were returned for this query. If you expected results to be "
@@ -7085,7 +7073,7 @@ msgid "One or many metrics to display"
msgstr "Una o varias métricas a mostrar"
msgid "One or more annotation layers failed loading."
msgstr "Una o más capas de anotación fallaron al cargar."
msgstr "No se han podido cargar una o más capas de anotación."
msgid "One or more columns already exist"
msgstr "Una o más columnas ya existen"
@@ -7501,7 +7489,7 @@ msgid "Pie Chart"
msgstr "Gráfico tipo pastel"
msgid "Pie charts on a map"
msgstr "Mapa con gráficos tipo pastel"
msgstr "Gráficos tipo pastel en un mapa"
msgid "Pie shape"
msgstr "Forma de pastel"
@@ -7602,6 +7590,7 @@ msgstr "Vuelve a introducir tu contraseña."
msgid "Please re-export your file and try importing again"
msgstr "Vuelve a exportar tu archivo e intenta importarlo de nuevo"
#, fuzzy
msgid "Please reach out to the Chart Owner for assistance."
msgid_plural "Please reach out to the Chart Owners for assistance."
msgstr[0] "Ponte en contacto con el propietario del gráfico para obtener ayuda."
@@ -8018,7 +8007,7 @@ msgid "Relationships between community channels"
msgstr "Relaciones entre canales comunitarios"
msgid "Relative Date/Time"
msgstr "Fecha/Hora Relativa"
msgstr "Fecha/hora relativa"
msgid "Relative period"
msgstr "Periodo relativo"
@@ -8763,9 +8752,6 @@ msgstr "Selecciona el método de entrega"
msgid "Select Tags"
msgstr "Seleccionar etiquetas"
msgid "Select Viz Type"
msgstr "Selecciona un tipo de visualización"
msgid "Select chart type"
msgstr "Seleccionar tipo de visualización"
@@ -9155,21 +9141,12 @@ msgstr "Mostrar burbujas"
msgid "Show CREATE VIEW statement"
msgstr "Mostrar instrucción CREAR VISTA"
msgid "Show Cell bars"
msgstr "Todos los gráficos"
msgid "Show Chart"
msgstr "Mostrar Gráfico"
msgid "Show Column"
msgstr "Mostrar Columna"
msgid "Show cell bars"
msgstr "Mostrar barras de celda"
msgid "Show Dashboard"
msgstr "Mostrar el panel de control"
msgid "Show Database"
msgstr "Mostrar Base de Datos"
msgid "Show Labels"
msgstr "Mostrar etiquetas"
@@ -9179,9 +9156,6 @@ msgstr "Mostrar registro"
msgid "Show Markers"
msgstr "Mostrar marcadores"
msgid "Show Metric"
msgstr "Mostrar Métrica"
msgid "Show Metric Names"
msgstr "Mostrar nombres de las métricas"
@@ -9449,7 +9423,7 @@ msgstr "Lo sentimos, se ha producido un error. Inténtalo de nuevo más tarde."
#, python-format
msgid "Sorry, there was an error saving this %s: %s"
msgstr "Lo sentimos, se ha producido un error al guardar esto %s: %s"
msgstr "Lo sentimos, se ha producido un error al guardar este %s: %s"
#, python-format
msgid "Sorry, there was an error saving this dashboard: %s"
@@ -9538,7 +9512,7 @@ msgid "Spatial"
msgstr "Espacial"
msgid "Specific Date/Time"
msgstr "Fecha/Hora Específica"
msgstr "Fecha/hora específica"
msgid "Specify name to CREATE TABLE AS schema in: public"
msgstr "Especifica el nombre para el esquema CREAR TABLA COMO en: público"
@@ -9603,7 +9577,7 @@ msgid "Start date"
msgstr "Fecha de inicio"
msgid "Start date included in time range"
msgstr "Fecha inicial incluida en el rango de tiempo"
msgstr "Fecha de inicio incluida en el intervalo de tiempo"
msgid "Start y-axis at 0"
msgstr "Iniciar eje Y en 0"
@@ -9759,9 +9733,6 @@ msgstr "Documentación del SDK integrado de Superset."
msgid "Superset chart"
msgstr "Gráfico Superset"
msgid "Superset dashboard"
msgstr "Dashboard Superset"
msgid "Superset encountered an error while running a command."
msgstr "Superset ha encontrado un error al ejecutar un comando."
@@ -9879,7 +9850,7 @@ msgstr "No se ha definido el nombre de la tabla"
#, python-format
msgid "Table or View \"%(table)s\" does not exist."
msgstr "La tabla o vista \"%(table)s\" no existe"
msgstr "La tabla o la vista «%(table)s» no existen."
msgid ""
"Table that visualizes paired t-tests, which are used to understand "
@@ -10746,9 +10717,7 @@ msgid "There was an error loading the tables"
msgstr "Se ha producido un error al cargar las tablas"
msgid "There was an error retrieving dashboard tabs."
msgstr ""
"Lo sentimos, hubo un error al obtener la información de la base de datos:"
" %s"
msgstr "Se ha producido un error al recuperar las pestañas del panel."
#, python-format
msgid "There was an error saving the favorite status: %s"
@@ -10767,7 +10736,7 @@ msgstr "Ha habido un problema al eliminar %s: %s"
#, python-format
msgid "There was an issue deleting rules: %s"
msgstr "Hubo un problema eliminando las reglas: %s"
msgstr "Ha habido un problema al eliminar las reglas: %s"
#, python-format
msgid "There was an issue deleting the selected %s: %s"
@@ -10826,11 +10795,11 @@ msgstr "Ha habido un problema al recuperar tu gráfico: %s "
#, python-format
msgid "There was an issue fetching your dashboards: %s"
msgstr "Hubo un problema al obtener tus dashboards: %s"
msgstr "Ha habido un problema al recuperar tus paneles de control: %s"
#, python-format
msgid "There was an issue fetching your recent activity: %s"
msgstr "Hubo un error al obtener tu actividad reciente: %s"
msgstr "Ha habido un problema al recuperar tu actividad reciente: %s"
#, python-format
msgid "There was an issue fetching your saved queries: %s"
@@ -10870,7 +10839,7 @@ msgid "This action will permanently delete the template."
msgstr "Esta acción eliminará permanentemente la plantilla."
msgid "This action will permanently delete the user."
msgstr "Esta acción eliminará permanentemente el usuario."
msgstr "Esta acción eliminará permanentemente el uduario."
msgid ""
"This can be either an IP address (e.g. 127.0.0.1) or a domain name (e.g. "
@@ -11079,6 +11048,7 @@ msgstr "Este tipo de visualización no admite el filtro cruzado."
msgid "This visualization type is not supported."
msgstr "Este tipo de visualización no se admite."
#, fuzzy
msgid "This was triggered by:"
msgid_plural "This may be triggered by:"
msgstr[0] "La causa de esto ha sido:"
@@ -11357,33 +11327,7 @@ msgid "Tree layout"
msgstr "Diseño del árbol"
msgid "Tree orientation"
Findings (brief):
- No git merge conflict markers found (no <<<<<<< / ======= / >>>>>>>).
- PO header mismatch: "Language: en" — this is an Spanish file; set to "es".
- Duplicate msgid entries with conflicting/empty translations:
- " at line %(line)d" — one entry has " en la línea %(line)d", another has an empty msgstr.
- "Dashboard cannot be copied due to invalid parameters." — appears multiple times with different/empty msgstr values.
- "%(subtitle)s\nThis may be triggered by:\n %(issue)s" — msgstr is empty in one occurrence.
- There are other repeated msgids with one occurrence left untranslated (examples: search for repeated msgid strings with one msgstr == "").
- Empty translations (examples):
- msgid "%(subtitle)s\nThis may be triggered by:\n %(issue)s" → msgstr "".
- Several other msgid entries have msgstr "" (scan for msgstr "" occurrences).
- Fuzzy entries present (e.g. entries annotated "#, fuzzy") — these need review and removal of the fuzzy flag after correction.
- Typo in a translation: msgid "This action will permanently delete the user." → msgstr contains "uduario." (should be "usuario.").
Recommended next steps:
- Fix header Language to "es".
- Remove/fix duplicate msgids: consolidate into a single entry and keep the correct translation.
- Fill in missing msgstr values (or mark as untranslated intentionally).
- Review and resolve fuzzy entries, then remove the "fuzzy" flag.
- Fix obvious typos (e.g., "uduario" → "usuario").
If you want, I can produce a patch that:
- updates header Language to "es",
- removes duplicate entries by keeping the first translated occurrence,
- lists all msgids with empty msgstr for you to translate,
or show exact locations (line ranges) for each problem. Which would you prefer?
msgstr "Orientación del árbol"
msgid "Treemap"
msgstr "Diagrama de árbol"
@@ -12039,7 +11983,7 @@ msgstr "WMS"
#, python-format
msgid "Waiting on %s"
msgstr "Esperando por %s"
msgstr "Esperando a %s"
msgid "Waiting on database..."
msgstr "Esperando a la base de datos..."
@@ -12139,7 +12083,7 @@ msgstr "Semanas %s"
msgid "Weight"
msgstr "Peso"
#, python-format
#, fuzzy, python-format
msgid ""
"Were having trouble loading these results. Queries are set to timeout "
"after %s second."
@@ -12149,7 +12093,7 @@ msgid_plural ""
msgstr[0] "Estamos teniendo problemas para cargar estos resultados. Se considera que una consulta ha superado el tiempo de espera después de %s segundo."
msgstr[1] "Estamos teniendo problemas para cargar estos resultados. Se considera que una consulta ha superado el tiempo de espera después de %s segundos."
#, python-format
#, fuzzy, python-format
msgid ""
"Were having trouble loading this visualization. Queries are set to "
"timeout after %s second."
@@ -12632,7 +12576,7 @@ msgstr "No puedes utilizar el diseño de marca de 45° con el filtro de interval
#, python-format
msgid "You do not have permission to edit this %s"
msgstr "No tienes permisos para editar esto %s"
msgstr "No tienes permisos para editar este %s"
msgid "You do not have permission to edit this chart"
msgstr "No tienes permisos para editar este gráfico"
@@ -12897,7 +12841,7 @@ msgid "background"
msgstr "fondo"
msgid "Basic conditional formatting"
msgstr "Formato condicional básico"
msgstr "formato condicional básico"
msgid "basis"
msgstr "base"
@@ -13220,18 +13164,13 @@ msgid ""
"is linked to %s charts that appear on %s dashboards and users have %s SQL"
" Lab tabs using this database open. Are you sure you want to continue? "
"Deleting the database will break those objects."
msgstr ""
"La base de datos %s está vinculada a %s gráficos que aparecen en %s "
"dashboards. ¿Estás seguro de que quieres continuar? Eliminar la base de "
"datos dejará inutilizables esos objetos."
msgstr "está vinculado a %s gráficos que aparecen en %s paneles de control y los usuarios tienen %s pestañas de SQL Lab usando esta base de datos abierta. ¿Seguro que quieres continuar? Eliminar la base de datos descompondrá esos objetos."
#, python-format
msgid ""
"is linked to %s charts that appear on %s dashboards. Are you sure you "
"want to continue? Deleting the dataset will break those objects."
msgstr ""
"esta linkeado a %s gráficos que aparecen en %s tableros. ¿Está seguro"
"de que desea continuar? Eliminar el conjunto de datos romperá esos objetos."
msgstr "está vinculado a %s gráficos que aparecen en %s paneles de control. ¿Seguro que quieres continuar? Eliminar el conjunto de datos descompondrá esos objetos."
msgid "is not"
msgstr "no es"
@@ -13371,19 +13310,16 @@ msgid "pixels"
msgstr "píxeles"
msgid "previous calendar month"
msgstr "mes anterior"
msgstr "mes natural anterior"
msgid "previous calendar quarter"
msgstr "trimestre anterior"
msgstr "trimestre natural anterior"
msgid "previous calendar week"
msgstr "semana anterior"
msgstr "semana natural anterior"
msgid "previous calendar year"
msgstr "año anterior"
msgid "published"
msgstr "No publicado"
msgstr "año natural anterior"
msgid "quarter"
msgstr "trimestre"
@@ -13403,9 +13339,6 @@ msgstr "reiniciar"
msgid "recent"
msgstr "reciente"
msgid "recents"
msgstr "Recientes"
msgid "recipients"
msgstr "destinatarios"
@@ -13427,9 +13360,6 @@ msgstr "rowlevelsecurity"
msgid "running"
msgstr "en ejecución"
msgid "saved queries"
msgstr "Consultas Guardadas"
msgid "save"
msgstr "guardar"

View File

@@ -753,11 +753,10 @@ class TestPostChartDataApi(BaseTestChartDataApi):
@with_feature_flags(GLOBAL_ASYNC_QUERIES=True)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch("superset.extensions.event_logger.log")
def test_chart_data_async_cached_sync_response(self, mock_event_logger):
def test_chart_data_async_cached_sync_response(self):
"""
Chart data API: Test chart data query returns results synchronously
when results are already cached, and that is_cached is logged.
when results are already cached.
"""
app._got_first_request = False
async_query_manager_factory.init_app(app)
@@ -768,7 +767,7 @@ class TestPostChartDataApi(BaseTestChartDataApi):
cmd_run_val = {
"query_context": QueryContext(),
"queries": [{"query": "select * from foo", "is_cached": True}],
"queries": [{"query": "select * from foo"}],
}
with mock.patch.object(
@@ -781,16 +780,7 @@ class TestPostChartDataApi(BaseTestChartDataApi):
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
patched_run.assert_called_once_with(force_cached=True)
assert data == {
"result": [{"query": "select * from foo", "is_cached": True}]
}
# Verify that is_cached was logged to event logger
call_kwargs = mock_event_logger.call_args[1]
records = call_kwargs.get("records", [])
assert len(records) > 0
# is_cached should be True when retrieved from cache in async path
assert records[0]["is_cached"] is True
assert data == {"result": [{"query": "select * from foo"}]}
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch("superset.extensions.event_logger.log")

View File

@@ -126,41 +126,6 @@ class TestSqlLab(SupersetTestCase):
"engine_name": engine_name,
}
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_sql_json_where_clause_content_type(self):
"""
Test that queries with WHERE clauses return proper Content-Type headers.
This test addresses issue #36072 where Flask 2.3+ content negotiation
could cause HTTP 406 errors for queries with WHERE clauses, particularly
when using ENABLE_PROXY_FIX or certain Accept header configurations.
"""
self.login(ADMIN_USERNAME)
# Test query with WHERE clause
resp = self.client.post(
"/api/v1/sqllab/execute/",
json={
"database_id": self.get_database_by_name("examples").id,
"sql": "SELECT * FROM birth_names WHERE name = 'John' LIMIT 5",
"client_id": "test_where_1",
},
)
# Verify response is successful
assert resp.status_code in (200, 202), f"Expected 200/202, got {resp.status_code}"
# Verify Content-Type header is explicitly set to prevent 406 errors
assert "application/json" in resp.headers.get("Content-Type", "")
# Verify response body is valid JSON
data = resp.json
assert isinstance(data, dict)
# If query ran synchronously (200), verify it has data
if resp.status_code == 200:
assert "data" in data or "query_id" in data
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_sql_json_dml_disallowed(self):
self.login(ADMIN_USERNAME)

View File

@@ -0,0 +1,365 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests for SQL expression processing during QueryObject validation.
This prevents cache key mismatches in composite queries where SQL expressions
are processed during validation and must remain consistent through execution.
"""
from typing import Any
from unittest.mock import Mock
from superset.common.query_object import QueryObject
from superset.connectors.sqla.models import SqlaTable
def test_sql_expressions_processed_during_validation():
"""
Test that SQL expressions are processed during QueryObject validation.
This is a regression test for a bug where:
1. A chart has a metric with sqlExpression: "sum(field)" (lowercase)
2. The same metric is used in both metrics and orderby
3. During SQL generation, orderby processing would uppercase to "SUM(field)"
4. This mutation caused cache key mismatches in composite queries
The fix ensures SQL expressions are processed during validate() so:
- Cache key uses processed expressions
- Query execution uses same processed expressions
- No mutation occurs during query generation
"""
# Create an adhoc metric with lowercase SQL - this is how users write them
adhoc_metric = {
"expressionType": "SQL",
"sqlExpression": "sum(num)", # lowercase - will be uppercased
"label": "Sum of Num",
}
# Mock datasource with required methods
mock_datasource = Mock(spec=SqlaTable)
mock_datasource.database_id = 1
mock_datasource.schema = "public"
# Simulate sanitize_clause behavior: uppercase SQL
def process_expression(expression: str, **kwargs: Any) -> str:
return expression.upper()
mock_datasource._process_select_expression = Mock(side_effect=process_expression)
mock_datasource._process_orderby_expression = Mock(side_effect=process_expression)
# Create QueryObject with adhoc metric in both metrics and orderby
query_obj = QueryObject(
datasource=mock_datasource,
metrics=[adhoc_metric],
orderby=[(adhoc_metric, True)],
columns=[],
extras={},
)
# Validate - this should process SQL expressions
query_obj.validate()
# After validation, SQL expressions should be processed (uppercased)
assert query_obj.metrics[0]["sqlExpression"] == "SUM(NUM)", (
"Validation should process metric SQL expressions"
)
assert query_obj.orderby[0][0]["sqlExpression"] == "SUM(NUM)", (
"Validation should process orderby SQL expressions"
)
def test_validation_does_not_mutate_original_dicts():
"""
Test that validation creates new dicts instead of mutating the originals.
This prevents issues where shared references to adhoc metrics could be
mutated unexpectedly, causing side effects in composite queries.
"""
# Create original adhoc metric
original_metric = {
"expressionType": "SQL",
"sqlExpression": "sum(sales)",
"label": "Total Sales",
}
# Keep a reference to verify no mutation
original_sql = original_metric["sqlExpression"]
# Mock datasource
mock_datasource = Mock(spec=SqlaTable)
mock_datasource.database_id = 1
mock_datasource.schema = "public"
def process_expression(expression: str, **kwargs: Any) -> str:
return expression.upper()
mock_datasource._process_select_expression = Mock(side_effect=process_expression)
mock_datasource._process_orderby_expression = Mock(side_effect=process_expression)
# Create QueryObject
query_obj = QueryObject(
datasource=mock_datasource,
metrics=[original_metric],
orderby=[(original_metric, True)],
columns=[],
extras={},
)
# Validate
query_obj.validate()
# Verify: original dict should NOT be mutated
assert original_metric["sqlExpression"] == original_sql, (
"Original metric dict should not be mutated during validation"
)
# Verify: QueryObject has processed expressions in NEW dicts
assert query_obj.metrics[0]["sqlExpression"] == "SUM(SALES)"
assert query_obj.orderby[0][0]["sqlExpression"] == "SUM(SALES)"
def test_validation_with_multiple_adhoc_metrics():
"""
Test validation with multiple adhoc metrics in metrics and orderby.
"""
metric1 = {
"expressionType": "SQL",
"sqlExpression": "sum(sales)",
"label": "Total Sales",
}
metric2 = {
"expressionType": "SQL",
"sqlExpression": "avg(price)",
"label": "Average Price",
}
# Mock datasource
mock_datasource = Mock(spec=SqlaTable)
mock_datasource.database_id = 1
mock_datasource.schema = "public"
def process_expression(expression: str, **kwargs: Any) -> str:
return expression.upper()
mock_datasource._process_select_expression = Mock(side_effect=process_expression)
mock_datasource._process_orderby_expression = Mock(side_effect=process_expression)
# Create QueryObject with multiple metrics
query_obj = QueryObject(
datasource=mock_datasource,
metrics=[metric1, metric2],
orderby=[(metric1, False), (metric2, True)],
columns=[],
extras={},
)
# Validate
query_obj.validate()
# Verify original dicts not mutated
assert metric1["sqlExpression"] == "sum(sales)"
assert metric2["sqlExpression"] == "avg(price)"
# Verify QueryObject has processed expressions
assert query_obj.metrics[0]["sqlExpression"] == "SUM(SALES)"
assert query_obj.metrics[1]["sqlExpression"] == "AVG(PRICE)"
assert query_obj.orderby[0][0]["sqlExpression"] == "SUM(SALES)"
assert query_obj.orderby[1][0]["sqlExpression"] == "AVG(PRICE)"
def test_validation_preserves_jinja_templates():
"""
Test that Jinja templates are preserved during validation.
Jinja templates should be processed during query execution with a
template_processor, not during validation.
"""
metric_with_jinja = {
"expressionType": "SQL",
"sqlExpression": "sum({{ column_name }})",
"label": "Dynamic Sum",
}
# Mock datasource
mock_datasource = Mock(spec=SqlaTable)
mock_datasource.database_id = 1
mock_datasource.schema = "public"
def process_expression(expression: str, **kwargs: Any) -> str:
# During validation, template_processor=None, so Jinja is not processed
# Only SQL keywords are uppercased
return expression.upper()
mock_datasource._process_select_expression = Mock(side_effect=process_expression)
mock_datasource._process_orderby_expression = Mock(side_effect=process_expression)
# Create QueryObject
query_obj = QueryObject(
datasource=mock_datasource,
metrics=[metric_with_jinja],
orderby=[(metric_with_jinja, True)],
columns=[],
extras={},
)
# Validate
query_obj.validate()
# Jinja template should remain in processed expression
assert "{{" in query_obj.metrics[0]["sqlExpression"]
assert "}}" in query_obj.metrics[0]["sqlExpression"]
def test_validation_serialization_stability():
"""
Test that serializing QueryObject metrics/orderby gives consistent results.
This simulates what happens during cache key computation - the QueryObject
is serialized to JSON. The serialization should be identical before and after
SQL processing since we create new dicts.
"""
from superset.utils import json
adhoc_metric = {
"expressionType": "SQL",
"sqlExpression": "sum(num)",
"label": "Sum",
}
# Mock datasource
mock_datasource = Mock(spec=SqlaTable)
mock_datasource.database_id = 1
mock_datasource.schema = "public"
def process_expression(expression: str, **kwargs: Any) -> str:
return expression.upper()
mock_datasource._process_select_expression = Mock(side_effect=process_expression)
mock_datasource._process_orderby_expression = Mock(side_effect=process_expression)
# Create QueryObject
query_obj = QueryObject(
datasource=mock_datasource,
metrics=[adhoc_metric],
orderby=[(adhoc_metric, True)],
columns=[],
extras={},
)
# Validate
query_obj.validate()
# Serialize the metrics and orderby
metrics_json_1 = json.dumps(query_obj.metrics, sort_keys=True)
orderby_json_1 = json.dumps(
[(col, asc) for col, asc in query_obj.orderby],
sort_keys=True,
)
# Re-serialize - should be identical
metrics_json_2 = json.dumps(query_obj.metrics, sort_keys=True)
orderby_json_2 = json.dumps(
[(col, asc) for col, asc in query_obj.orderby],
sort_keys=True,
)
assert metrics_json_1 == metrics_json_2, "Metrics serialization should be stable"
assert orderby_json_1 == orderby_json_2, "Orderby serialization should be stable"
# Verify processed SQL in serialized form
assert "SUM(NUM)" in metrics_json_1
assert "SUM(NUM)" in orderby_json_1
def test_orderby_uses_processed_true():
"""
Test that adhoc metrics in orderby are processed with processed=True.
This is a regression test ensuring compatibility with PR #35342's adhoc orderby fix.
The issue: Orderby expressions are processed during validation with ORDER BY
wrapping. If re-processed during execution with SELECT wrapping, it breaks parsing.
The fix: Pass processed=True when calling adhoc_metric_to_sqla() for orderby items
to skip re-processing and avoid incorrect SELECT wrapping.
"""
from unittest.mock import MagicMock, patch
from superset.models.helpers import ExploreMixin
# Create an adhoc metric that would be used in orderby
adhoc_metric = {
"expressionType": "SQL",
"sqlExpression": "COUNT(*)",
"label": "count_metric",
}
# Mock the datasource
mock_datasource = MagicMock()
mock_datasource.database_id = 1
mock_datasource.database.backend = "postgresql"
mock_datasource.schema = "public"
# Track calls to adhoc_metric_to_sqla
calls_log = []
def tracked_adhoc_metric_to_sqla(self, metric, columns_by_name, **kwargs):
# Log the call with its parameters
calls_log.append(
{
"metric": metric,
"processed": kwargs.get("processed", False),
"has_template_processor": "template_processor" in kwargs,
}
)
# Return a mock column element
from sqlalchemy import literal_column
return literal_column("mock_col")
with patch.object(
ExploreMixin,
"adhoc_metric_to_sqla",
tracked_adhoc_metric_to_sqla,
):
# Create a mock query object that has been validated
# (so orderby expressions are already processed)
mock_query_obj = Mock()
mock_query_obj.metrics = [adhoc_metric]
mock_query_obj.orderby = [(adhoc_metric, True)]
# Simulate the orderby processing in get_sqla_query
# This is what happens in helpers.py around line 1868
from superset.utils import core as utils
if isinstance(adhoc_metric, dict) and utils.is_adhoc_metric(adhoc_metric):
# This should call adhoc_metric_to_sqla with processed=True
tracked_adhoc_metric_to_sqla(
mock_datasource,
adhoc_metric,
{},
processed=True, # This is the fix!
)
# Verify that the call was made with processed=True
assert len(calls_log) >= 1, "adhoc_metric_to_sqla should have been called"
orderby_call = calls_log[-1]
assert orderby_call["processed"] is True, (
"Orderby adhoc metrics must be called with processed=True to avoid "
"re-processing with incorrect SELECT wrapping (should use ORDER BY wrapping)"
)

View File

@@ -29,8 +29,6 @@ from sqlalchemy import create_engine
from sqlalchemy.orm.session import Session
from sqlalchemy.pool import StaticPool
from superset.superset_typing import AdhocColumn
if TYPE_CHECKING:
from superset.models.core import Database
@@ -1127,34 +1125,3 @@ def test_process_select_expression_end_to_end(database: Database) -> None:
assert expected.replace(" ", "").lower() in result.replace(" ", "").lower(), (
f"Expected '{expected}' to be in result '{result}' for input '{expression}'"
)
def test_adhoc_column_to_sqla_with_column_reference(database: Database) -> None:
"""
Test that adhoc_column_to_sqla
properly quotes column identifiers when isColumnReference is true.
This tests the fix for column names with spaces being properly quoted
before being processed by SQLGlot to prevent "column AS alias" misinterpretation.
"""
from superset.connectors.sqla.models import SqlaTable
table = SqlaTable(
table_name="test_table",
database=database,
)
# Test 1: Column reference with spaces should be quoted
col_with_spaces: AdhocColumn = {
"sqlExpression": "Customer Name",
"label": "Customer Name",
"isColumnReference": True,
}
result = table.adhoc_column_to_sqla(col_with_spaces)
# Should contain the quoted column name
assert result is not None
result_str = str(result)
assert '"Customer Name"' in result_str