diff --git a/accounts/dcf_support.py b/accounts/dcf_support.py index 877a891f..63bbc6e0 100755 --- a/accounts/dcf_support.py +++ b/accounts/dcf_support.py @@ -39,6 +39,7 @@ DCF_GOOGLE_SA_VERIFY_URL = settings.DCF_GOOGLE_SA_VERIFY_URL DCF_GOOGLE_SA_MONITOR_URL = settings.DCF_GOOGLE_SA_MONITOR_URL DCF_GOOGLE_SA_URL = settings.DCF_GOOGLE_SA_URL +DCF_URL_URL = settings.DCF_URL_URL class DCFCommFailure(Exception): """Thrown if we have problems communicating with DCF """ @@ -682,6 +683,34 @@ def _write_dataset_summary(dataset_info, dataset_id, phs_map): return is_ok, combo_msg +def get_signed_url_from_dcf(user_id, file_uuid): + """ + :raise TokenFailure: + :raise InternalTokenError: + :raise DCFCommFailure: + :raise RefreshTokenExpired: + """ + # + # Get a signed URL for a file ID. + # + + try: + resp = _dcf_call('{}/{}'.format(DCF_URL_URL, file_uuid), user_id) + except (TokenFailure, InternalTokenError, RefreshTokenExpired, DCFCommFailure) as e: + logger.error("[ERROR] Attempt to contact DCF for signed URL failed (user {})".format(user_id)) + raise e + except Exception as e: + logger.error("[ERROR] Attempt to contact DCF for signed URL failed (user {})".format(user_id)) + raise e + + result = { + 'uri': resp.text, + 'code': resp.status_code + } + + return result + + def verify_sa_at_dcf(user_id, gcp_id, service_account_id, datasets, phs_map, sa_in_use): """ :raise TokenFailure: @@ -712,7 +741,7 @@ def verify_sa_at_dcf(user_id, gcp_id, service_account_id, datasets, phs_map, sa_ try: # DCF requires this to be in the header. OAuth2 library glues this onto the auth header stuff: headers = {'Content-Type': 'application/json'} - + logger.info("[INFO] DCF verification request: {} {}".format(json_dumps(sa_data), service_account_id)) resp = _dcf_call(full_url, user_id, mode=use_mode, post_body=json_dumps(sa_data), headers=headers) except (TokenFailure, InternalTokenError, RefreshTokenExpired, DCFCommFailure) as e: logger.error("[ERROR] Attempt to contact DCF for SA verification failed (user {})".format(user_id)) @@ -1147,7 +1176,7 @@ def refresh_at_dcf(user_id): resp = None # - # Call DCF to drop the linkage. Note that this will immediately remove them from controlled access. + # Call DCF to refresh the linkage. # try: diff --git a/accounts/dcf_views.py b/accounts/dcf_views.py index 9e072d6b..bc5128f2 100755 --- a/accounts/dcf_views.py +++ b/accounts/dcf_views.py @@ -81,7 +81,7 @@ def oauth2_login(request): # Found that 'user' scope had to be included to be able to do the user query on callback, and the data scope # to do data queries. Starting to recognize a pattern here... - oauth = OAuth2Session(client_id, redirect_uri=full_callback, scope=['openid', 'user', 'google_service_account', 'google_link']) + oauth = OAuth2Session(client_id, redirect_uri=full_callback, scope=['openid', 'user', 'data', 'google_service_account', 'google_link']) authorization_url, state = oauth.authorization_url(DCF_AUTH_URL) # stash the state string in the session! diff --git a/accounts/utils.py b/accounts/utils.py index af5e36a0..df3ee485 100644 --- a/accounts/utils.py +++ b/accounts/utils.py @@ -296,3 +296,24 @@ def unreg_gcp(user, gcp_id): status=500 return response, status + + +def get_user_gcps(user, gcp_id=None): + gcps = [] + gcp_list = None + + try: + if gcp_id: + gcp_list = GoogleProject.objects.filter(user=user, active=1) + else: + gcp_list = GoogleProject.objects.filter(user=user, active=1, project_id=gcp_id) + + for gcp in gcp_list: + gcps.append({'gcp_id': gcp.project_id, 'gcp_name': gcp.project_name, 'users': [x.email for x in gcp.users_set.all()]}) + + except Exception as e: + logger.error("[ERROR] While fetching the GCP project list for user {}:") + logger.exception(e) + + return gcps + diff --git a/cohorts/metadata_helpers.py b/cohorts/metadata_helpers.py index 35f94c5f..62443f5e 100644 --- a/cohorts/metadata_helpers.py +++ b/cohorts/metadata_helpers.py @@ -1385,28 +1385,26 @@ def get_full_sample_metadata(barcodes): result = { 'total_found': 0 } - db = None - cursor = None - - barcodes_by_program = {} - for barcode in barcodes: - dash = barcode.find("-") - if dash >= 0: - prog = barcode[0:dash] - if prog not in ['TCGA', 'TARGET']: + try: + barcodes_by_program = {} + + for barcode in barcodes: + dash = barcode.find("-") + if dash >= 0: + prog = barcode[0:dash] + if prog not in ['TCGA', 'TARGET']: + prog = 'CCLE' + else: prog = 'CCLE' - else: - prog = 'CCLE' - if prog not in barcodes_by_program: - barcodes_by_program[prog] = () - barcodes_by_program[prog] += (barcode,) + if prog not in barcodes_by_program: + barcodes_by_program[prog] = [] + barcodes_by_program[prog].append(barcode) - programs = Program.objects.filter(name__in=list(barcodes_by_program.keys()), active=True, is_public=True) + programs = Program.objects.filter(name__in=list(barcodes_by_program.keys()), active=True, is_public=True) - items = {} + items = {} - try: db = get_sql_connection() cursor = db.cursor() @@ -1414,54 +1412,76 @@ def get_full_sample_metadata(barcodes): program_tables = program.get_metadata_tables() program_data_tables = program.get_data_tables() - cursor.execute(""" - SELECT biospec.sample_barcode as sb, biospec.case_barcode as cb, biospec.* - FROM {} biospec - WHERE biospec.sample_barcode IN ({}) AND biospec.endpoint_type = 'current' - """.format(program_tables.biospec_table, ",".join(["%s"] * (len(barcodes_by_program[program.name])))), - barcodes_by_program[program.name]) - - fields = cursor.description - skip = ['endpoint_type', 'metadata_clinical_id', 'metadata_biospecimen_id', 'sb', 'cb'] + search_clause = BigQuerySupport.build_bq_filter_and_params({'sample_barcode': barcodes_by_program[program.name]}) - for row in cursor.fetchall(): - items[row[0]] = { - 'sample_barcode': row[0], - 'case_barcode': row[1], - 'biospecimen_data': {fields[index][0]: column for index, column in enumerate(row) if - fields[index][0] not in skip}, - 'data_details': {} + sample_job = BigQuerySupport.insert_query_job(""" + SELECT biospec.sample_barcode as sb, biospec.case_barcode as cb, biospec.* + FROM `{}` biospec + WHERE {} + """.format( + "{}.{}.{}".format(settings.BIGQUERY_DATA_PROJECT_ID, program_tables.bq_dataset, program_tables.biospec_bq_table,), + search_clause['filter_string'] + ), search_clause['parameters']) + + bq_results = BigQuerySupport.wait_for_done_and_get_results(sample_job) + result_schema = BigQuerySupport.get_result_schema(sample_job['jobReference']) + + skip = ['endpoint_type', 'metadata_clinical_id', 'metadata_biospecimen_id', 'sb', 'cb', 'case_barcode'] + + for row in bq_results: + items[row['f'][0]['v']] = { + 'sample_barcode': row['f'][0]['v'], + 'case_barcode': row['f'][1]['v'], + 'data_details': { + x.build: [] for x in program_data_tables + }, + 'biospecimen_data': {result_schema['fields'][index]['name']: x['v'] for index, x in enumerate(row['f'], start=0) if result_schema['fields'][index]['name'] not in skip} } - for build in program_data_tables: - cursor.execute(""" - SELECT md.sample_barcode as sb, md.* - FROM {} md - WHERE md.sample_barcode IN ({}) AND NOT(md.sample_barcode = '') AND md.sample_barcode IS NOT NULL - """.format(build.data_table, ",".join(["%s"] * (len(barcodes_by_program[program.name])))), - barcodes_by_program[program.name]) + if len(list(items.keys())): + queries = [] + + for build_table in program_data_tables: + logger.info(str(build_table)) + queries.append({ + 'query': """ + #standardSQL + SELECT md.sample_barcode as sb, md.* + FROM `{}` md + WHERE {} AND NOT(md.sample_barcode = '') AND md.sample_barcode IS NOT NULL + """.format( + "{}.{}.{}".format( + settings.BIGQUERY_DATA_PROJECT_ID, build_table.bq_dataset, + build_table.data_table.lower()), + search_clause['filter_string']), + 'parameters': search_clause['parameters'], + 'build': build_table.build + }) + + results = BigQuerySupport.insert_job_batch_and_get_results(queries) + + for bq_result in results: + result_schema = bq_result['result_schema'] + bq_results = bq_result['bq_results'] + if not bq_results or not result_schema: + logger.warn("[WARNING] Results not received for this query:") + logger.warn("{}".format(bq_result['query'])) + continue + for row in bq_results: + items[row['f'][0]['v']]['data_details'][bq_result['build']].append({ + result_schema['fields'][index]['name']: x['v'] for index, x in enumerate(row['f'], start=0) if result_schema['fields'][index]['name'] not in skip + }) - fields = cursor.description - for row in cursor.fetchall(): - if not build.build in items[row[0]]['data_details']: - items[row[0]]['data_details'][build.build] = [] - items[row[0]]['data_details'][build.build].append( - {fields[index][0]: column for index, column in enumerate(row) if fields[index][0] not in skip} - ) + # TODO: Once we have aliquots in the database again, add those here - # TODO: Once we have aliquots in the database again, add those here + result['total_found'] += 1 + result['samples'] = [item for item in list(items.values())] - result['total_found'] += 1 - result['samples'] = [item for item in list(items.values())] + return result except Exception as e: logger.error("[ERROR] While fetching sample metadata for {}:".format(barcode)) logger.exception(e) - finally: - if cursor: cursor.close() - if db and db.open: db.close() - - return result def get_full_case_metadata(barcodes): @@ -1469,90 +1489,113 @@ def get_full_case_metadata(barcodes): result = { 'total_found': 0 } - db = None - cursor = None - - barcodes_by_program = {} - for barcode in barcodes: - dash = barcode.find("-") - if dash >= 0: - prog = barcode[0:dash] - if prog not in ['TCGA','TARGET']: + try: + barcodes_by_program = {} + + for barcode in barcodes: + dash = barcode.find("-") + if dash >= 0: + prog = barcode[0:dash] + if prog not in ['TCGA', 'TARGET']: + prog = 'CCLE' + else: prog = 'CCLE' - else: - prog = 'CCLE' - if prog not in barcodes_by_program: - barcodes_by_program[prog] = () - barcodes_by_program[prog] += (barcode,) + if prog not in barcodes_by_program: + barcodes_by_program[prog] = [] + barcodes_by_program[prog].append(barcode) - programs = Program.objects.filter(name__in=list(barcodes_by_program.keys()),active=True,is_public=True) + programs = Program.objects.filter(name__in=list(barcodes_by_program.keys()), active=True, is_public=True) - items = {} - - try: - db = get_sql_connection() - cursor = db.cursor() + items = {} for program in programs: program_tables = program.get_metadata_tables() program_data_tables = program.get_data_tables() + + bq_search = BigQuerySupport.build_bq_filter_and_params({'case_barcode': barcodes_by_program[program.name]}) - cursor.execute(""" + case_job = BigQuerySupport.insert_query_job(""" + #standardSQL SELECT clin.case_barcode as cb, clin.* - FROM {} clin - WHERE clin.case_barcode IN ({}) AND clin.endpoint_type = 'current' - """.format(program_tables.clin_table, ",".join(["%s"]*(len(barcodes_by_program[program.name])))), barcodes_by_program[program.name]) + FROM `{}` clin + WHERE {} + """.format("{}.{}.{}".format( + settings.BIGQUERY_DATA_PROJECT_ID, program_tables.bq_dataset, program_tables.clin_bq_table), + bq_search['filter_string']), bq_search['parameters']) - fields = cursor.description - skip = ['endpoint_type', 'metadata_clinical_id', 'metadata_biospecimen_id', 'cb'] + bq_results = BigQuerySupport.wait_for_done_and_get_results(case_job) + result_schema = BigQuerySupport.get_result_schema(case_job['jobReference']) - for row in cursor.fetchall(): - items[row[0]] = { - 'case_barcode': row[0], - 'clinical_data': {fields[index][0]: column for index, column in enumerate(row) if fields[index][0] not in skip}, + skip = ['endpoint_type', 'metadata_clinical_id', 'metadata_biospecimen_id', 'cb', 'summary_file_count'] + + for row in bq_results: + items[row['f'][0]['v']] = { + 'case_barcode': row['f'][0]['v'], 'samples': [], - 'data_details': {} + 'data_details': { + x.build: [] for x in program_data_tables + }, + 'clinlical_data': {result_schema['fields'][index]['name']: x['v'] for index, x in enumerate(row['f'], start=0) if result_schema['fields'][index]['name'] not in skip} } - cursor.execute(""" - SELECT case_barcode, sample_barcode - FROM {} - WHERE case_barcode IN ({}) AND endpoint_type = 'current' - """.format(program_tables.biospec_table, ",".join(["%s"] * (len(barcodes_by_program[program.name])))), barcodes_by_program[program.name]) - - for row in cursor.fetchall(): - items[row[0]]['samples'].append(row[1]) - - for build in program_data_tables: - cursor.execute(""" - SELECT md.case_barcode as cb, md.* - FROM {} md - WHERE md.case_barcode IN ({}) AND (md.sample_barcode = '' OR md.sample_barcode IS NULL) - """.format(build.data_table, ",".join(["%s"] * (len(barcodes_by_program[program.name])))), - barcodes_by_program[program.name]) + if len(list(items.keys())): + queries = [] + + for build_table in program_data_tables: + logger.info(str(build_table)) + queries.append({ + 'query': """ + #standardSQL + SELECT md.case_barcode as cb, md.* + FROM `{}` md + WHERE {} AND (md.sample_barcode = '' OR md.sample_barcode IS NULL) + """.format( + "{}.{}.{}".format( + settings.BIGQUERY_DATA_PROJECT_ID, build_table.bq_dataset, build_table.data_table.lower()), + bq_search['filter_string']), + 'parameters': bq_search['parameters'], + 'query_type': 'data_details', + 'build': build_table.build + }) + + queries.append({ + 'query': """ + #standardSQL + SELECT case_barcode, sample_barcode + FROM `{}` + WHERE {} + """.format("{}.{}.{}".format( + settings.BIGQUERY_DATA_PROJECT_ID, program_tables.bq_dataset, program_tables.biospec_bq_table, + ), bq_search['filter_string']), + 'parameters': bq_search['parameters'], + 'query_type': 'samples' + }) + + results = BigQuerySupport.insert_job_batch_and_get_results(queries) + + for bq_result in results: + result_schema = bq_result['result_schema'] + bq_results = bq_result['bq_results'] + if bq_result['query_type'] == 'samples': + for row in bq_results: + items[row['f'][0]['v']]['samples'].append(row['f'][1]['v']) + else: + for row in bq_results: + items[row['f'][0]['v']]['data_details'][bq_result['build']].append({ + result_schema['fields'][index]['name']: x['v'] for index, x in enumerate(row['f'], start=0) if result_schema['fields'][index]['name'] not in skip + }) - fields = cursor.description - for row in cursor.fetchall(): - if not build.build in items[row[0]]['data_details']: - items[row[0]]['data_details'][build.build] = [] - items[row[0]]['data_details'][build.build].append( - {fields[index][0]: column for index, column in enumerate(row) if fields[index][0] not in skip} - ) + # TODO: Once we have aliquots in the database again, add those here - # TODO: Once we have aliquots in the database again, add those here + result['total_found'] += 1 + result['cases'] = [item for item in list(items.values())] - result['total_found'] += 1 - result['cases'] = [item for item in list(items.values())] + return result except Exception as e: logger.error("[ERROR] While fetching sample metadata for {}:".format(barcode)) logger.exception(e) - finally: - if cursor: cursor.close() - if db and db.open: db.close() - - return result def get_sample_metadata(barcode): @@ -1696,7 +1739,15 @@ def get_sample_case_list_bq(cohort_id=None, inc_filters=None, comb_mut_filters=' if key_field_type not in field_types: invalid_keys.append(key_split) else: - filters[field_types[key_field_type]['type']][key_field] = inc_filters[prog][key_split] + # Check to make sure any string values aren't empty strings - if they are, it's invalid. + vals = inc_filters[prog][key_split] + if not isinstance(vals, list): + vals = [inc_filters[prog][key_split]] + for val in vals: + if isinstance(val, str) and not len(val): + invalid_keys.append(key_split) + else: + filters[field_types[key_field_type]['type']][key_field] = inc_filters[prog][key_split] if len(invalid_keys) > 0: raise Exception("Improper filter(s) supplied for program {}: '{}'".format(prog, ("', '".join(invalid_keys)))) diff --git a/cohorts/utils.py b/cohorts/utils.py index 905ad1c7..1b593948 100644 --- a/cohorts/utils.py +++ b/cohorts/utils.py @@ -31,9 +31,9 @@ from django.conf import settings -def create_cohort(user, filters=None, name=None, desc=None, source_id=None): +def create_cohort(user, filters=None, name=None, description=None, source_id=None): - if not filters and not name and not desc: + if not filters and not name and not description: # Can't save/edit a cohort when nothing is being changed! return None @@ -45,8 +45,12 @@ def create_cohort(user, filters=None, name=None, desc=None, source_id=None): source_progs = source.get_programs() if source and not filters or (len(filters) <= 0): - # If we're only changing the name and/or desc, just edit the cohort and update it - source.update(name=name, description=desc) + # If we're only changing the name and/or desc, just edit the cohort and return + if name: + source.name = name + if description: + source.description = description + source.save() return { 'cohort_id': source.id } # Make and save cohort @@ -75,7 +79,7 @@ def create_cohort(user, filters=None, name=None, desc=None, source_id=None): } # Create new cohort - cohort = Cohort.objects.create(name=name, description=desc) + cohort = Cohort.objects.create(name=name, description=description) cohort.save() # Set permission for user to be owner diff --git a/google_helpers/bigquery/bq_support.py b/google_helpers/bigquery/bq_support.py index 3534f125..51826263 100644 --- a/google_helpers/bigquery/bq_support.py +++ b/google_helpers/bigquery/bq_support.py @@ -302,16 +302,7 @@ def execute_query(self, query, parameters=None, write_disposition='WRITE_EMPTY', 'total_bytes_processed': query_job['statistics']['query']['totalBytesProcessed'] } - job_is_done = self.bq_service.jobs().get(projectId=self.executing_project, - jobId=job_id).execute(num_retries=5) - - retries = 0 - - while (job_is_done and not job_is_done['status']['state'] == 'DONE') and retries < BQ_ATTEMPT_MAX: - retries += 1 - sleep(1) - job_is_done = self.bq_service.jobs().get(projectId=self.executing_project, - jobId=job_id).execute(num_retries=5) + job_is_done = self.await_job_is_done(query_job) # Parse the final disposition if job_is_done and job_is_done['status']['state'] == 'DONE': @@ -333,6 +324,20 @@ def execute_query(self, query, parameters=None, write_disposition='WRITE_EMPTY', return query_results + # Check for a job's status for the maximum number of attempts, return the final resulting response + def await_job_is_done(self, query_job): + done = self.job_is_done(query_job) + retries = 0 + + while not done and retries < BQ_ATTEMPT_MAX: + retries += 1 + sleep(1) + done = self.job_is_done(query_job) + + return self.bq_service.jobs().get( + projectId=self.executing_project, jobId=query_job['jobReference']['jobId'] + ).execute(num_retries=5) + # Check to see if query job is done def job_is_done(self, query_job): job_is_done = self.bq_service.jobs().get(projectId=self.executing_project, @@ -342,6 +347,7 @@ def job_is_done(self, query_job): # Fetch the results of a job based on the reference provided def fetch_job_results(self, job_ref): + logger.info(str(job_ref)) result = [] page_token = None @@ -394,9 +400,9 @@ def insert_query_job(cls, query, parameters=None): # Check the status of a BQ job @classmethod - def check_job_is_done(cls, job_ref): + def check_job_is_done(cls, query_job): bqs = cls(None, None, None) - return bqs.job_is_done(job_ref) + return bqs.job_is_done(query_job) # Do a 'dry run' query, which estimates the cost @classmethod @@ -404,12 +410,20 @@ def estimate_query_cost(cls, query, parameters=None): bqs = cls(None, None, None) return bqs.execute_query(query, parameters, cost_est=True) - # Given a BQ service and a job reference, fetch out the results + # Given a job reference, fetch out the results @classmethod def get_job_results(cls, job_reference): bqs = cls(None, None, None) return bqs.fetch_job_results(job_reference) + # Given a job reference for a running job, await the completion, + # then fetch and return the results + @classmethod + def wait_for_done_and_get_results(cls, query_job): + bqs = cls(None, None, None) + check_done = bqs.await_job_is_done(query_job) + return bqs.fetch_job_results(check_done['jobReference']) + # Given a BQ service and a job reference, fetch out the results @classmethod def get_job_resource(cls, job_id, project_id): @@ -430,6 +444,51 @@ def get_table_schema(cls, projectId, datasetId, tableId): return [{'name': x['name'], 'type': x['type']} for x in table['schema']['fields']] + @classmethod + def get_result_schema(cls, job_ref): + bqs = cls(None, None, None) + results = bqs.bq_service.jobs().getQueryResults(**job_ref).execute(num_retries=5) + + return results['schema'] + + # Method for submitting a group of jobs and awaiting the results of the whole set + @classmethod + def insert_job_batch_and_get_results(cls, query_set): + logger.info(str(query_set)) + bqs = cls(None, None, None) + submitted_job_set = {} + for query in query_set: + job_obj = bqs.insert_bq_query_job(query['query'],query['parameters']) + query['job_id'] = job_obj['jobReference']['jobId'] + submitted_job_set[job_obj['jobReference']['jobId']] = job_obj + + not_done = True + still_checking = True + num_retries = 0 + + while still_checking and not_done: + not_done = False + for job in submitted_job_set: + if not BigQuerySupport.check_job_is_done(submitted_job_set[job]): + not_done = True + if not_done: + sleep(1) + num_retries += 1 + still_checking = (num_retries < settings.BQ_MAX_ATTEMPTS) + + if not_done: + logger.warn("[WARNING] Not all of the queries completed!") + + for query in query_set: + if bqs.job_is_done(submitted_job_set[query['job_id']]): + query['bq_results'] = bqs.fetch_job_results(submitted_job_set[query['job_id']]['jobReference']) + query['result_schema'] = BigQuerySupport.get_result_schema(submitted_job_set[query['job_id']]['jobReference']) + else: + query['bq_results'] = None + query['result_schema'] = None + + return query_set + # Builds a BQ API v2 QueryParameter set and WHERE clause string from a set of filters of the form: # { # 'field_name': [,...] diff --git a/google_helpers/compute_service.py b/google_helpers/compute_service.py new file mode 100644 index 00000000..85ddfe22 --- /dev/null +++ b/google_helpers/compute_service.py @@ -0,0 +1,42 @@ +""" + +Copyright 2019, Institute for Systems Biology + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from oauth2client.client import GoogleCredentials +from django.conf import settings +import httplib2 +# from .utils import build_with_retries + +from googleapiclient.discovery import build + +COMPUTE_SCOPES = ['https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform'] + + +# def get_crm_resource(): +# """ +# Returns: a Cloud Resource Manager service client for calling the API. +# """ +# credentials = GoogleCredentials.get_application_default() +# service = build_with_retries('cloudresourcemanager', 'v1beta1', credentials, 2) +# return service + +def get_compute_resource(): + credentials = GoogleCredentials.from_stream(settings.GOOGLE_APPLICATION_CREDENTIALS).create_scoped(COMPUTE_SCOPES) + http = credentials.authorize(httplib2.Http()) + service = build('compute', 'v1', http=http, cache_discovery=False) + return service diff --git a/projects/models.py b/projects/models.py index cfac5081..3e7e496f 100644 --- a/projects/models.py +++ b/projects/models.py @@ -231,7 +231,8 @@ class Meta(object): verbose_name_plural = "Public Data Tables" def __str__(self): - return self.program__name + " " + self.build + " Data Tables" + return "{} [{}] Data Tables".format(self.program.name,self.build) + class Public_Annotation_Tables(models.Model): program = models.ForeignKey(Program, null=False)