Skip to content
huawu02 edited this page May 24, 2017 · 58 revisions

Set-up

Mux recons require the cni/ge code from github and a recent version of octave (>=3.6.4). To install the most recent octave on older Ubuntu systems, use this PPA:

sudo apt-add-repository ppa:octave/stable
sudo apt-get update
sudo apt-get install octave

PIL sucks. To fix it, toss it out and install PILLOW instead (ensuring you have the deps installed first):

pip uninstall PIL
apt-get install libjpeg-dev libfreetype6-dev zlib1g-dev libpng12-dev
pip install -I PILLOW

Manipulating the DB by hand

First, get into a paster shell:

. ~/tg2/bin/activate
paster shell production.ini

To fix issues with ipython in virtual envs:

python
>>> from IPython import embed
>>> embed()

In the shell, start with a couple of imports:

from nimsgears.model import *
import transaction

if things get weird, try:

transaction.abort()

To rerun a job when you have the job id:

Job.query.filter(Job.id==6059).first().needs_rerun=True
transaction.commit()

To Rerun all failed jobs:

for j in Job.query.filter(Job.status=='failed').all(): j.needs_rerun = True
transaction.commit()

To clear all failed jobs:

for j in Job.query.filter(Job.status=='failed').all(): j.status = u'abandoned'
transaction.commit()

Or if you have the epoch id, you can use that:

Job.query.join(Epoch).filter(Epoch.id==6059).first().needs_rerun=True
transaction.commit()

Rerun all jobs from a session that match a description:

# sid = Session.query.filter(Session.exam==5001).first().id
epochs = [e.id for e in Session.get(sid).epochs if 'mux' in e.description]
for e in epochs: Job.query.join(Epoch).filter(Epoch.id==e).first().needs_rerun=True; transaction.commit()

If you have a list of exam numbers:

exams = [1,2,3,4]
epochs = [e.id for s in [Session.query.filter(Session.exam==ex).first().epochs for ex in exams] for e in s if e.psd==u'cni_epi']
for e in epochs: Job.query.join(Epoch).filter(Epoch.id==e).first().needs_rerun=True
transaction.commit()

All sessions in an experiment:

exp = 183706
epochs=[e.id for e in Epoch.query.join(Session,Epoch.session).join(Subject,Session.subject).join(Experiment,Subject.experiment).filter(Experiment.id==exp).all() if 'mux3' in e.description]
redo = [e for e in epochs if len(Epoch.get(e).datasets)<2]
' '.join([str(r) for r in redo])
for e in redo: Job.query.join(Epoch).filter(Epoch.id==e).first().needs_rerun=True
transaction.commit()

Or:

epochs=[e.id for e in Epoch.query.join(Session,Epoch.session).join(Subject,Session.subject).join(Experiment,Subject.experiment).filter(Experiment.id==183).filter(Epoch.description.startswith(u'PROBE')).all()]
for eid in epochs:
    e = Epoch.get(eid)
    pfds = [d for d in e.datasets if d.filetype==u'pfile']
    pds = [d for d in e.datasets if d.kind==u'primary']
    if pfds and pfds[0].kind != u'primary':
        print('fixing epoch ' + str(e))
        e.primary_dataset.kind = u'secondary'
        pfds[0].kind = u'primary'
    transaction.commit()

for eid in epochs: 
    Job.query.join(Epoch).filter(Epoch.id==eid).first().needs_rerun=True
    transaction.commit()

Redo qa for a bunch of exams:

exams = [1676,1734,1794]
epochs = []
for ex in exams: epochs.extend([e.id for e in Epoch.query.join(Session,Epoch.session).filter(Session.exam==ex).filter(Epoch.scan_type==u'functional').all()])
for e in epochs: Epoch.get(e).qa_status = u'rerun'
transaction.commit()

To create a user:

u = User.by_uid(uid=u'newuserID', create=True)
Group.query.filter_by(gid=u'users').first().users.append(u)
transaction.commit()

To create a new research group:

ResearchGroup(gid=u'sunetid_of_pi')
transaction.commit()

To add PI to a research group:

ResearchGroup.get_by(gid=u'sunetid_of_pi').pis = [User.by_uid(uid=u'sunetid_of_pi')]
transaction.commit()

To quickly add new users:

users = ['sunetid1','sunetid2',...]
ResearchGroup.get_by(gid=u'GROUPNAME').members=[User.by_uid(uid=uid.decode(), create=True) for uid in users]

To quickly add new research groups:

pis = ['gregz','gold','herfkens','aiagaru','mvasa','iang','shreyasv','biswals','glover','spielman']
for pi in pis: 
    try: u = User.by_uid(uid=pi.decode(), create=True)
    except: pass
    rg = ResearchGroup(gid=pi.decode());
    rg.pis = [u]
    transaction.commit()

Add a new super-user:

Group.get_by(gid=u'superusers').users.append(User.by_uid(uid=u'SUNETID_OF_USER'))
transaction.commit()

Creating jobs:

import nimsutil
import os

nims_path = '/net/nimsfs/mnt/nimsfs/nims/'
for eid in epochs:
    dc = Epoch.get(eid)
    new_digest = nimsutil.redigest(os.path.join(nims_path, dc.primary_dataset.relpath))
    if dc.primary_dataset.digest != new_digest:
        dc.primary_dataset.digest = new_digest
        job = Job.query.filter_by(data_container=dc).filter_by(task=u'find&proc').first()
        if not job:
            job = Job(data_container=dc, task=u'find&proc', status=u'pending', activity=u'pending')
            print('Created job for epoch %d' % eid)
        elif job.status != u'pending' and not job.needs_rerun:
            job.needs_rerun = True
            print('Marked job for epoch %d for restart' % eid)
    dc.scheduling = False
    transaction.commit()

Doing something to a bunch of datasets:

import os
from nimsdata import nimsraw
epochs = Epoch.query.filter(Epoch.psd==u'muxarcepi').all()
dacq_ctrl = []
for e in epochs:
    pd = e.primary_dataset
    fn = [f for f in pd.filenames if not f.startswith('_')][0]
    if pd.filetype == nimsraw.NIMSPFile.filetype:
        pf = nimsraw.NIMSPFile(os.path.join('/_nimsfs', pd.relpath, fn))
        dacq_ctrl.append(pf._hdr.rec.dacq_ctrl)
print(dacq_ctrl)

import nibabel as nb
epochs=Epoch.query.filter(Epoch.psd.endswith(u'epi')).all()
for e in epochs:
    n = ([None]+[d for d in e.datasets if d.filetype==u'nifti'])[-1]
    if n!=None:
        nf = nb.load(os.path.join('/_nimsfs', n.relpath, n.filenames[0]))
        print((e.session.experiment, e.session.exam, e.series, int(nf.get_header()['slice_code'])))

Get p-file datasets from a list of epoch ids:

epochs = [190571,192493,192496,192688,192690,193264,195958,195852,195854,195856,190726,190730,190578]
#dsids = [d.id for d in Epoch.get(e).datasets if d.filetype==u'pfile' for e in epochs]
for eid in epochs:
    epoch = Epoch.get(eid)
    good_epochs = [e.id for e in epoch.session.epochs if e.id not in epochs and e.num_bands==epoch.num_bands and e.num_slices==epoch.num_slices and e.phase_encode_undersample==epoch.phase_encode_undersample and (('pe1' in e.description and 'pe1' in epoch.description) or ('pe1' not in e.description and 'pe1' not in epoch.description))]

Get epoch id's from a list of exam, series, acqs:

esas = {13994:(19, 28, 31), 14229: (34, 13, 25), 14257: (7, 13, 19, 28)}

epoch_ids = []
for ex,se_list in esas.iteritems():
    for se in se_list:
        epoch = Epoch.query.join(Session, Epoch.session).filter(Session.exam==ex, Epoch.series==se).all()
        e = epoch[-1]
        print((e.name,e.id))
        epoch_ids.append(e.id)

Fixing a p-file tarball that's missing a ref scan (see also fix_refscan.py in nims/scripts)

dsg_id = 514215
dsb_ids = [514214,]

dsg = Dataset.get(dsg_id)
good_filepath = os.path.join('/net/cnifs/cnifs/nims/',dsg.relpath,dsg.filenames[0])
print('tar -xzf ' + good_filepath)
good_ref = os.path.splitext(dsg.filenames[0])[0] + '/P*_refscan.7'

for dsbid in dsb_ids:
    dsb = Dataset.get(dsbid)
    bad_filepath = os.path.join('/net/cnifs/cnifs/nims/',dsb.relpath,dsb.filenames[0])
    print('tar -xzf ' + bad_filepath)
    p = os.path.splitext(dsb.filenames[0])[0]
    print('cp ' + good_ref + ' ' + p + 'P') 
    print('tar -czf ' + p + '.tgz ' + p + '/METADATA.json ' + p + '/DIGEST.txt ' + p +'/P*')
    print('mv ' + p + '.tgz ' + bad_filepath)

Un-associating a pair of subject/session so two sessions that were assigned the same subject (and thus must have the same subject code) can have unique codes:

sess = Session.query.filter(Session.exam==12604).first()
sess.subject = sess.subject.clone(sess.experiment)
transaction.commit()

And if you want to change the code for that subject:

Session.query.filter(Session.exam==12604).first().subject.code = u'new_code'
transaction.commit()

Get all subjects:

rg=ResearchGroup.query.filter(ResearchGroup.gid==u'cni').first()
year_ago = datetime.date.today()-datetime.timedelta(days=365)
subjects=[(s.firstname.lower(),s.lastname.lower()) for ex in rg.experiments for s in ex.subjects if s.sessions[-1].timestamp>year_ago]

Deleting epochs:

from shutil import rmtree
from os import path
data_path = '/net/nimsfs/mnt/nimsfs/nims/'
epoch_ids = [e.id for e in Epoch.query.join(Session,Epoch.session)
             .join(Subject,Session.subject)
             .join(Experiment,Subject.experiment)
             .filter(Experiment.name==u'sgagnon')
             .filter(Epoch.description==u'3D_Saved_State_AutoSave').all()]

print('The following epochs will be permanently DELETED:')
for eid in epoch_ids:
    e = Epoch.get(eid)
    print('  %s in %s owned by %s' % (e.description, e.session.experiment.name, e.session.experiment.owner.gid))

for eid in epoch_ids:
    e = Epoch.get(eid)
    print('Deleting %s in %s...' % (e.description,e.session.name))
    for did in [d.id for d in e.datasets]:
        d = Dataset.get(did)
        rmtree(path.join(data_path, d.relpath))
        d.delete()
        transaction.commit()
    e = Epoch.get(eid)
    e.delete()
    transaction.commit()

Re-reaping files

To have files re-reaped, the easiest thing to do is to:

  • stop the reaper:
    1. ssh into reaper
    2. attach to the screen session (screen -ARD reaper)
    3. find the reaper session, and do ctrl-c.
  • edit nims/nimsproc/.cnimr_pfiles.datetime (for pfiles) and/or nims/nimsproc/.cnimr.datetime (for dicoms) and reset the timestamp. Anything later than this time will be re-reaped.

Note that if you do this too far back in time, you might lose data, because any files that were already reaped are discarded, and the originals might no longer exist on cnimr (e.g., the p-file numbers have rolled around).

If you need to re-reap something older, or you want to be more selective, you can copy the new files into place by hand, update the dataset file list, and recompute the directory hash. (TODO: Need notes!)

Manual recons

New nimsdata calling convention: ./nimsdata/nimsdata.py -p pfile -w nifti --parser_kwarg 'num_jobs=17' /path/to/pfile.tgz /out/base/name

Clone this wiki locally