Skip to content

Commit

Permalink
Simplificando
Browse files Browse the repository at this point in the history
  • Loading branch information
avdata99 committed Jan 13, 2025
1 parent ce5e5f2 commit 0b2db1d
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 49 deletions.
27 changes: 2 additions & 25 deletions ckanext/superset/blueprints/superset.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,34 +176,11 @@ def list_datasets():
""" List all datasets created from Superset charts """
cfg = get_config()
sc = SupersetCKAN(**cfg)
sc.load_datasets()

# Extraer los IDs de datasets directamente desde sc.datasets
dataset_ids = [dataset.get('id') for dataset in sc.datasets if dataset.get('id')]
raw_datasets = sc.get_list_datasets(dataset_ids)

# Procesar los datos para aplanarlos
datasets = []
for d in raw_datasets:
if d is not None and isinstance(d, dict):
log.debug(f"Procesando dataset: {d}")
if d.get('description') is None:
d['description'] = 'Sin descripción'
if d.get('database') is None:
d['database'] = {'database_name': 'Sin organización'}
datasets.append({
'table_name': d.get('table_name', 'Sin nombre'),
'description': d.get('description'),
'database_name': d.get('database').get('database_name'),
'superset_chart_id': d.get('id'),
'private': False, # Ajustar lógica si hay un indicador real de privacidad
})
else:
log.warning(f"Elemento no procesado en raw_datasets: {d}")
superset_datasets = sc.get_datasets()

superset_url = tk.config.get('ckanext.superset.instance.url')
extra_vars = {
'datasets': datasets,
'datasets': superset_datasets,
'superset_url': superset_url,
}
return tk.render('superset/list-datasets.html', extra_vars)
32 changes: 8 additions & 24 deletions ckanext/superset/data/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,11 @@ def __init__(

def load_datasets(self, force=False):
""" Get and load all datasets """
log.info("Loading datasets")
if self.datasets and not force:
return

q_data = {"page_size": 50, "page": 0}
log.debug("DENTRO DE load_datasets")
log.debug("Q_DATA:", q_data)
self.datasets = []
while True:
params = {'q': json.dumps(q_data)}
Expand All @@ -73,10 +72,6 @@ def load_datasets(self, force=False):
self.datasets.extend(datasets)

q_data["page"] += 1
print("Q_DATA['page']:", q_data["page"])
if q_data["page"] > 5:
log.error("Too many pages of datasets")
break

return self.datasets

Expand All @@ -101,9 +96,7 @@ def load_charts(self, force=False):
ds.load(chart)
self.charts.append(ds)
q_data["page"] += 1
if q_data["page"] > 20:
log.error("Too many pages of charts")
break

return self.charts

def load_databases(self, force=False):
Expand All @@ -129,21 +122,6 @@ def get_dataset(self, dataset_id):
self.datasets.append(dataset)
return dataset

def get_list_datasets(self, dataset_ids):
""" Get a list of datasets """
list_datasets = []

for dataset_id in dataset_ids:
# Verificar si ya está en self.datasets
dataset = next((d for d in self.datasets if d.get('id') == dataset_id), None)
if not dataset:
# Si no está, obtenerlo desde la API
dataset = SupersetDataset(superset_instance=self)
dataset.get_from_superset(dataset_id)
self.datasets.append(dataset)
list_datasets.append(dataset)
return list_datasets

def get_chart(self, chart_id):
""" Get a chart by ID """
for chart in self.charts:
Expand All @@ -161,6 +139,12 @@ def get_databases(self):
self.load_databases(self)
return self.databases

def get_datasets(self):
""" Get a list_dataset """
# Get from the API
self.load_datasets(self)
return self.datasets

def prepare_connection(self):
""" Define the client and login if required """
log.info(f"Connecting to {self.superset_url}")
Expand Down

0 comments on commit 0b2db1d

Please sign in to comment.