Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reading fix #236

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions indra_db/cli/content.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ class ContentManager(object):
tr_cols: Tuple = NotImplemented
tc_cols: Tuple = NotImplemented
primary_col: str = NotImplemented
err_patt = re.compile('.*?constraint "(.*?)".*?Key \((.*?)\)=\((.*?)\).*?',
err_patt = re.compile(r'.*?constraint "(.*?)".*?Key \((.*?)\)=\((.*?)\).*?',
re.DOTALL)

def __init__(self):
Expand Down Expand Up @@ -1513,7 +1513,7 @@ def get_file_data(self):

def get_tarname_from_filename(self, fname):
"Get the name of the tar file based on the file name (or a pmcid)."
re_match = re.match('(PMC00\d).*?', fname)
re_match = re.match(r'(PMC00\d).*?', fname)
if re_match is not None:
tarname = re_match.group(0) + 6*'X' + '.xml.tar.gz'
else:
Expand Down
2 changes: 1 addition & 1 deletion indra_db/cli/reading.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def run_local(task, buffer, num_procs):
readers = ['SPARSER', 'REACH', 'EIDOS', 'TRIPS']
bulk_manager = BulkLocalReadingManager(readers,
buffer_days=buffer,
n_procs=num_procs)
n_proc=num_procs)
if task == 'all':
bulk_manager.read_all(db)
elif task == 'new':
Expand Down
2 changes: 1 addition & 1 deletion indra_db/reading/read_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def __init__(self, result, reading_id=None, db_info_id=None,
class DatabaseStatementData(DatabaseResultData):
def __init__(self, *args, **kwargs):
super(DatabaseStatementData, self).__init__(*args, **kwargs)
self.__text_patt = re.compile('[\W_]+')
self.__text_patt = re.compile(r'[\W_]+')

@staticmethod
def get_cols():
Expand Down
2 changes: 1 addition & 1 deletion indra_db/util/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


def uncamel(word):
return re.sub(r'([a-z])([A-Z])', '\g<1>_\g<2>', word).lower()
return re.sub(r'([a-z])([A-Z])', r'\g<1>_\g<2>', word).lower()


def get_role_kwargs(role):
Expand Down
2 changes: 1 addition & 1 deletion indra_db/util/data_gatherer.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def digest_s3_files():
bucket = S3_DATA_LOC['bucket']
prefix = S3_DATA_LOC['prefix']

patt = re.compile(prefix + '([0-9]+)/(\w*?)/?(\w+)_([0-9]+).json')
patt = re.compile(prefix + r'([0-9]+)/(\w*?)/?(\w+)_([0-9]+).json')

# Get a list of the prefixes for each day.
res = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, Delimiter='/')
Expand Down
2 changes: 1 addition & 1 deletion indra_db/util/s3_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def from_key_parts(cls, bucket, *key_elements):

@classmethod
def from_string(cls, s3_key_str):
patt = re.compile('s3://([a-z0-9\-.]+)/(.*)')
patt = re.compile(r's3://([a-z0-9\-.]+)/(.*)')
m = patt.match(s3_key_str)
if m is None:
raise ValueError("Invalid format for s3 path: %s" % s3_key_str)
Expand Down