Skip to content
This repository has been archived by the owner on May 17, 2024. It is now read-only.

Commit

Permalink
Merge pull request #803 from datafold/redshift_svv_columns
Browse files Browse the repository at this point in the history
redshift: also try to get schema from svv_columns
  • Loading branch information
dlawin authored Dec 16, 2023
2 parents 6738ca7 + d45c332 commit 71a1b3d
Showing 1 changed file with 36 additions and 1 deletion.
37 changes: 36 additions & 1 deletion data_diff/databases/redshift.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,38 @@ def query_pg_get_cols(self, path: DbPath) -> Dict[str, tuple]:

return schema_dict

def select_svv_columns_schema(self, path: DbPath) -> Dict[str, tuple]:
database, schema, table = self._normalize_table_path(path)

db_clause = ""
if database:
db_clause = f" AND table_catalog = '{database.lower()}'"

return (
f"""
select
distinct
column_name,
data_type,
datetime_precision,
numeric_precision,
numeric_scale
from
svv_columns
where table_name = '{table.lower()}' and table_schema = '{schema.lower()}'
"""
+ db_clause
)

def query_svv_columns(self, path: DbPath) -> Dict[str, tuple]:
rows = self.query(self.select_svv_columns_schema(path), list)
if not rows:
raise RuntimeError(f"{self.name}: Table '{'.'.join(path)}' does not exist, or has no columns")

d = {r[0]: r for r in rows}
assert len(d) == len(rows)
return d

# when using a non-information_schema source, strip (N) from type(N) etc. to match
# typical information_schema output
def _normalize_schema_info(self, rows) -> Dict[str, tuple]:
Expand Down Expand Up @@ -150,7 +182,10 @@ def query_table_schema(self, path: DbPath) -> Dict[str, tuple]:
try:
return self.query_external_table_schema(path)
except RuntimeError:
return self.query_pg_get_cols(path)
try:
return self.query_pg_get_cols(path)
except Exception:
return self.query_svv_columns(path)

def _normalize_table_path(self, path: DbPath) -> DbPath:
if len(path) == 1:
Expand Down

0 comments on commit 71a1b3d

Please sign in to comment.