fix: add timegrains to data payload (#20938)

* add timegrains to data payload

* fix

* opps

* save

* integrate type casting for engiines

* add perm object

* change how wwe raise_for_access

* fix orderby on column types

* linting
This commit is contained in:
Hugh A. Miles II
2022-08-04 13:26:49 -04:00
committed by GitHub
parent eb5369f2a6
commit 6e5036d87f
3 changed files with 103 additions and 2 deletions

View File

@@ -46,6 +46,7 @@ from superset.models.helpers import QueryResult
from superset.utils import csv
from superset.utils.cache import generate_cache_key, set_and_log_cache
from superset.utils.core import (
DatasourceType,
DTTM_ALIAS,
error_msg_from_exception,
get_column_names_from_columns,
@@ -512,4 +513,8 @@ class QueryContextProcessor:
"""
for query in self._query_context.queries:
query.validate()
security_manager.raise_for_access(query_context=self._query_context)
if self._qc_datasource.type == DatasourceType.QUERY:
security_manager.raise_for_access(query=self._qc_datasource)
else:
security_manager.raise_for_access(query_context=self._query_context)

View File

@@ -1201,6 +1201,47 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
return or_(*groups)
def dttm_sql_literal(self, dttm: sa.DateTime, col_type: Optional[str]) -> str:
"""Convert datetime object to a SQL expression string"""
sql = (
self.db_engine_spec.convert_dttm(col_type, dttm, db_extra=None)
if col_type
else None
)
if sql:
return sql
return f'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'
def get_time_filter(
self,
time_col: Dict[str, Any],
start_dttm: sa.DateTime,
end_dttm: sa.DateTime,
) -> ColumnElement:
label = "__time"
col = time_col.get("column_name")
sqla_col = literal_column(col)
my_col = self.make_sqla_column_compatible(sqla_col, label)
l = []
if start_dttm:
l.append(
my_col
>= self.db_engine_spec.get_text_clause(
self.dttm_sql_literal(start_dttm, time_col.get("type"))
)
)
if end_dttm:
l.append(
my_col
< self.db_engine_spec.get_text_clause(
self.dttm_sql_literal(end_dttm, time_col.get("type"))
)
)
return and_(*l)
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
"""Runs query against sqla to retrieve some
sample values for the given column.
@@ -1257,6 +1298,12 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
time_expr = self.db_engine_spec.get_timestamp_expr(col, None, time_grain)
return self.make_sqla_column_compatible(time_expr, label)
def get_sqla_col(self, col: Dict[str, Any]) -> Column:
label = col.get("column_name")
col_type = col.get("type")
col = sa.column(label, type_=col_type)
return self.make_sqla_column_compatible(col, label)
def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
self,
apply_fetch_values_predicate: bool = False,
@@ -1393,7 +1440,11 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
col = metrics_exprs_by_expr.get(str(col), col)
need_groupby = True
elif col in columns_by_name:
col = columns_by_name[col].get_sqla_col()
gb_column_obj = columns_by_name[col]
if isinstance(gb_column_obj, dict):
col = self.get_sqla_col(gb_column_obj)
else:
col = gb_column_obj.get_sqla_col()
elif col in metrics_exprs_by_label:
col = metrics_exprs_by_label[col]
need_groupby = True
@@ -1490,6 +1541,33 @@ class ExploreMixin: # pylint: disable=too-many-public-methods
select_exprs.insert(0, timestamp)
groupby_all_columns[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
if isinstance(self.main_dttm_col, dict):
time_filters.append(
self.get_time_filter(
self.main_dttm_col,
from_dttm,
to_dttm,
)
)
else:
time_filters.append(
columns_by_name[self.main_dttm_col].get_time_filter(
from_dttm,
to_dttm,
)
)
if isinstance(dttm_col, dict):
time_filters.append(self.get_time_filter(dttm_col, from_dttm, to_dttm))
else:
time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))
# Always remove duplicates by column name, as sometimes `metrics_exprs`
# can have the same name as a groupby column (e.g. when users use
# raw columns as custom SQL adhoc metric).

View File

@@ -218,7 +218,20 @@ class Query(
@property
def data(self) -> Dict[str, Any]:
order_by_choices = []
for col in self.columns:
column_name = str(col.get("column_name") or "")
order_by_choices.append(
(json.dumps([column_name, True]), column_name + " [asc]")
)
order_by_choices.append(
(json.dumps([column_name, False]), column_name + " [desc]")
)
return {
"time_grain_sqla": [
(g.duration, g.name) for g in self.database.grains() or []
],
"filter_select": True,
"name": self.tab_name,
"columns": self.columns,
@@ -228,6 +241,7 @@ class Query(
"sql": self.sql,
"owners": self.owners_data,
"database": {"id": self.database_id, "backend": self.database.backend},
"order_by_choices": order_by_choices,
}
def raise_for_access(self) -> None:
@@ -282,6 +296,10 @@ class Query(
def schema_perm(self) -> str:
return f"{self.database.database_name}.{self.schema}"
@property
def perm(self) -> str:
return f"[{self.database.database_name}].[{self.tab_name}](id:{self.id})"
@property
def default_endpoint(self) -> str:
return ""