Moving away from using the root logger everywhere (#9099)

* Moving away from using the root logger everywhere

* self.logger -> logger
This commit is contained in:
Craig Rueda
2020-02-07 23:38:48 -08:00
committed by GitHub
parent 3cbe228dc1
commit 607cfd1f29
31 changed files with 213 additions and 169 deletions

View File

@@ -65,6 +65,7 @@ except ImportError:
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
DTTM_ALIAS = "__timestamp"
ADHOC_METRIC_EXPRESSION_TYPES = {"SIMPLE": "SIMPLE", "SQL": "SQL"}
@@ -99,9 +100,9 @@ def flasher(msg, severity=None):
flash(msg, severity)
except RuntimeError:
if severity == "danger":
logging.error(msg)
logger.error(msg)
else:
logging.info(msg)
logger.info(msg)
class _memoized:
@@ -242,7 +243,7 @@ def parse_human_datetime(s):
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timetuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
logger.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
@@ -544,7 +545,7 @@ def validate_json(obj):
try:
json.loads(obj)
except Exception as e:
logging.error(f"JSON is not valid {e}")
logger.error(f"JSON is not valid {e}")
raise SupersetException("JSON is not valid")
@@ -568,7 +569,7 @@ class timeout:
self.error_message = error_message
def handle_timeout(self, signum, frame):
logging.error("Process timed out")
logger.error("Process timed out")
raise SupersetTimeoutException(self.error_message)
def __enter__(self):
@@ -576,15 +577,15 @@ class timeout:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
logger.warning("timeout can't be used in the current context")
logger.exception(e)
def __exit__(self, type, value, traceback):
try:
signal.alarm(0)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
logger.warning("timeout can't be used in the current context")
logger.exception(e)
def pessimistic_connection_handling(some_engine):
@@ -640,7 +641,7 @@ def notify_user_about_perm_udate(granter, user, role, datasource, tpl_name, conf
msg = render_template(
tpl_name, granter=granter, user=user, role=role, datasource=datasource
)
logging.info(msg)
logger.info(msg)
subject = __(
"[Superset] Access to the datasource %(name)s was granted",
name=datasource.full_name,
@@ -746,12 +747,12 @@ def send_MIME_email(e_from, e_to, mime_msg, config, dryrun=False):
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info("Sent an email to " + str(e_to))
logger.info("Sent an email to " + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
else:
logging.info("Dryrun enabled, email notification content is below:")
logging.info(mime_msg.as_string())
logger.info("Dryrun enabled, email notification content is below:")
logger.info(mime_msg.as_string())
def get_email_address_list(address_string: str) -> List[str]:
@@ -924,7 +925,7 @@ def get_or_create_db(database_name, sqlalchemy_uri, *args, **kwargs):
db.session.query(models.Database).filter_by(database_name=database_name).first()
)
if not database:
logging.info(f"Creating database reference for {database_name}")
logger.info(f"Creating database reference for {database_name}")
database = models.Database(database_name=database_name, *args, **kwargs)
db.session.add(database)

View File

@@ -24,6 +24,8 @@ from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
logger = logging.getLogger(__name__)
def decode_dashboards(o):
"""
@@ -64,7 +66,7 @@ def import_dashboards(session, data_stream, import_time=None):
def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info("Starting export")
logger.info("Starting export")
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:

View File

@@ -28,6 +28,7 @@ from superset.utils.dates import now_as_float
# resource? Flask-Caching will cache forever, but for the HTTP header we need
# to specify a "far future" date.
FAR_FUTURE = 365 * 24 * 60 * 60 # 1 year in seconds
logger = logging.getLogger(__name__)
@contextmanager
@@ -81,7 +82,7 @@ def etag_cache(max_age, check_perms=bool):
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception("Exception possibly due to cache backend.")
logger.exception("Exception possibly due to cache backend.")
# if no response was cached, compute it using the wrapped function
if response is None:
@@ -103,7 +104,7 @@ def etag_cache(max_age, check_perms=bool):
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception("Exception possibly due to cache backend.")
logger.exception("Exception possibly due to cache backend.")
return response.make_conditional(request)

View File

@@ -22,6 +22,7 @@ from superset.models.core import Database
DATABASES_KEY = "databases"
DRUID_CLUSTERS_KEY = "druid_clusters"
logger = logging.getLogger(__name__)
def export_schema_to_dict(back_references):
@@ -42,7 +43,7 @@ def export_schema_to_dict(back_references):
def export_to_dict(session, recursive, back_references, include_defaults):
"""Exports databases and druid clusters to a dictionary"""
logging.info("Starting export")
logger.info("Starting export")
dbs = session.query(Database)
databases = [
database.export_to_dict(
@@ -52,7 +53,7 @@ def export_to_dict(session, recursive, back_references, include_defaults):
)
for database in dbs
]
logging.info("Exported %d %s", len(databases), DATABASES_KEY)
logger.info("Exported %d %s", len(databases), DATABASES_KEY)
cls = session.query(DruidCluster)
clusters = [
cluster.export_to_dict(
@@ -62,7 +63,7 @@ def export_to_dict(session, recursive, back_references, include_defaults):
)
for cluster in cls
]
logging.info("Exported %d %s", len(clusters), DRUID_CLUSTERS_KEY)
logger.info("Exported %d %s", len(clusters), DRUID_CLUSTERS_KEY)
data = dict()
if databases:
data[DATABASES_KEY] = databases
@@ -74,15 +75,15 @@ def export_to_dict(session, recursive, back_references, include_defaults):
def import_from_dict(session, data, sync=[]):
"""Imports databases and druid clusters from dictionary"""
if isinstance(data, dict):
logging.info("Importing %d %s", len(data.get(DATABASES_KEY, [])), DATABASES_KEY)
logger.info("Importing %d %s", len(data.get(DATABASES_KEY, [])), DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info(
logger.info(
"Importing %d %s", len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY
)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info("Supplied object is not a dictionary.")
logger.info("Supplied object is not a dictionary.")

View File

@@ -18,6 +18,8 @@ import logging
from sqlalchemy.orm.session import make_transient
logger = logging.getLogger(__name__)
def import_datasource(
session, i_datasource, lookup_database, lookup_datasource, import_time
@@ -29,7 +31,7 @@ def import_datasource(
superset instances. Audit metadata isn't copies over.
"""
make_transient(i_datasource)
logging.info("Started import of the datasource: %s", i_datasource.to_json())
logger.info("Started import of the datasource: %s", i_datasource.to_json())
i_datasource.id = None
i_datasource.database_id = lookup_database(i_datasource).id
@@ -49,7 +51,7 @@ def import_datasource(
for metric in i_datasource.metrics:
new_m = metric.copy()
new_m.table_id = datasource.id
logging.info(
logger.info(
"Importing metric %s from the datasource: %s",
new_m.to_json(),
i_datasource.full_name,
@@ -61,7 +63,7 @@ def import_datasource(
for column in i_datasource.columns:
new_c = column.copy()
new_c.table_id = datasource.id
logging.info(
logger.info(
"Importing column %s from the datasource: %s",
new_c.to_json(),
i_datasource.full_name,

View File

@@ -21,6 +21,8 @@ from logging.handlers import TimedRotatingFileHandler
import flask.app
import flask.config
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class LoggingConfigurator(abc.ABC):
@@ -64,4 +66,4 @@ class DefaultLoggingConfigurator(LoggingConfigurator):
)
logging.getLogger().addHandler(handler)
logging.info("logging was configured successfully")
logger.info("logging was configured successfully")