mirror of
https://github.com/apache/superset.git
synced 2026-04-19 16:14:52 +00:00
fix: Add dataset ID to file name on exports (#34782)
This commit is contained in:
@@ -2390,6 +2390,63 @@ class TestDatasetApi(SupersetTestCase):
|
||||
# gamma users by default do not have access to this dataset
|
||||
assert rv.status_code in (403, 404)
|
||||
|
||||
def test_export_dataset_bundle_with_id_in_filename(self):
|
||||
"""
|
||||
Dataset API: Test that exported dataset filenames include the dataset ID
|
||||
to prevent filename collisions when datasets have identical names.
|
||||
"""
|
||||
# Test fails for SQLite because of same table name
|
||||
if backend() == "sqlite":
|
||||
return
|
||||
|
||||
first_connection = self.insert_database("test_db_connection_1")
|
||||
second_connection = self.insert_database("test_db_connection_2")
|
||||
first_dataset = self.insert_dataset(
|
||||
table_name="test_dataset",
|
||||
owners=[],
|
||||
database=first_connection,
|
||||
fetch_metadata=False,
|
||||
)
|
||||
second_dataset = self.insert_dataset(
|
||||
table_name="test_dataset",
|
||||
owners=[],
|
||||
database=second_connection,
|
||||
fetch_metadata=False,
|
||||
)
|
||||
|
||||
self.items_to_delete = [
|
||||
first_dataset,
|
||||
second_dataset,
|
||||
first_connection,
|
||||
second_connection,
|
||||
]
|
||||
|
||||
self.login(ADMIN_USERNAME)
|
||||
argument = [first_dataset.id, second_dataset.id]
|
||||
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
|
||||
rv = self.get_assert_metric(uri, "export")
|
||||
|
||||
assert rv.status_code == 200
|
||||
|
||||
buf = BytesIO(rv.data)
|
||||
assert is_zipfile(buf)
|
||||
|
||||
with ZipFile(buf, "r") as zip_file:
|
||||
filenames = zip_file.namelist()
|
||||
|
||||
assert any(
|
||||
filename.endswith(
|
||||
f"datasets/test_db_connection_1/test_dataset_{first_dataset.id}.yaml"
|
||||
)
|
||||
for filename in filenames
|
||||
)
|
||||
assert any(
|
||||
filename.endswith(
|
||||
f"datasets/test_db_connection_2/test_dataset_{second_dataset.id}.yaml"
|
||||
)
|
||||
for filename in filenames
|
||||
)
|
||||
|
||||
@unittest.skip("Number of related objects depend on DB")
|
||||
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
|
||||
def test_get_dataset_related_objects(self):
|
||||
|
||||
@@ -78,11 +78,13 @@ class TestExportDatasetsCommand(SupersetTestCase):
|
||||
|
||||
assert list(contents.keys()) == [
|
||||
"metadata.yaml",
|
||||
"datasets/examples/energy_usage.yaml",
|
||||
f"datasets/examples/energy_usage_{example_dataset.id}.yaml",
|
||||
"databases/examples.yaml",
|
||||
]
|
||||
|
||||
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"]())
|
||||
metadata = yaml.safe_load(
|
||||
contents[f"datasets/examples/energy_usage_{example_dataset.id}.yaml"]()
|
||||
)
|
||||
|
||||
# sort columns for deterministic comparison
|
||||
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
|
||||
@@ -218,7 +220,9 @@ class TestExportDatasetsCommand(SupersetTestCase):
|
||||
command = ExportDatasetsCommand([example_dataset.id])
|
||||
contents = dict(command.run())
|
||||
|
||||
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"]())
|
||||
metadata = yaml.safe_load(
|
||||
contents[f"datasets/examples/energy_usage_{example_dataset.id}.yaml"]()
|
||||
)
|
||||
assert list(metadata.keys()) == [
|
||||
"table_name",
|
||||
"main_dttm_col",
|
||||
@@ -261,7 +265,7 @@ class TestExportDatasetsCommand(SupersetTestCase):
|
||||
|
||||
assert list(contents.keys()) == [
|
||||
"metadata.yaml",
|
||||
"datasets/examples/energy_usage.yaml",
|
||||
f"datasets/examples/energy_usage_{example_dataset.id}.yaml",
|
||||
]
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user