diff --git a/manage.py b/manage.py
index 6969298..774fc57 100644
--- a/manage.py
+++ b/manage.py
@@ -5,7 +5,7 @@
def main():
- os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'preflib.settings')
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "preflib.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
@@ -17,5 +17,5 @@ def main():
execute_from_command_line(sys.argv)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/preflib/settings.py b/preflib/settings.py
index d3901a0..9d07f89 100755
--- a/preflib/settings.py
+++ b/preflib/settings.py
@@ -23,52 +23,51 @@
# Application definition
INSTALLED_APPS = [
- 'preflibapp.apps.PreflibappConfig',
- 'django.contrib.admin',
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.messages',
- 'django.contrib.staticfiles',
- 'django_distill',
+ "preflibapp.apps.PreflibappConfig",
+ "django.contrib.admin",
+ "django.contrib.auth",
+ "django.contrib.contenttypes",
+ "django.contrib.sessions",
+ "django.contrib.messages",
+ "django.contrib.staticfiles",
+ "django_distill",
]
MIDDLEWARE = [
- 'django.middleware.security.SecurityMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.common.CommonMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.messages.middleware.MessageMiddleware',
- 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+ "django.middleware.security.SecurityMiddleware",
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.common.CommonMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "django.contrib.messages.middleware.MessageMiddleware",
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
]
-ROOT_URLCONF = 'preflib.urls'
+ROOT_URLCONF = "preflib.urls"
TEMPLATES = [
{
- 'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'DIRS': [os.path.join(BASE_DIR, 'templates')],
- 'APP_DIRS': True,
- 'OPTIONS': {
- 'context_processors': [
- 'django.template.context_processors.debug',
- 'django.template.context_processors.request',
- 'django.contrib.auth.context_processors.auth',
- 'django.contrib.messages.context_processors.messages',
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
+ "DIRS": [os.path.join(BASE_DIR, "templates")],
+ "APP_DIRS": True,
+ "OPTIONS": {
+ "context_processors": [
+ "django.template.context_processors.debug",
+ "django.template.context_processors.request",
+ "django.contrib.auth.context_processors.auth",
+ "django.contrib.messages.context_processors.messages",
],
},
},
]
-WSGI_APPLICATION = 'preflib.wsgi.application'
+WSGI_APPLICATION = "preflib.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
-DATABASES = {
-}
+DATABASES = {}
# Password validation
@@ -76,16 +75,16 @@
AUTH_PASSWORD_VALIDATORS = [
{
- 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+ "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+ "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+ "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+ "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
@@ -93,9 +92,9 @@
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
-LANGUAGE_CODE = 'en-us'
+LANGUAGE_CODE = "en-us"
-TIME_ZONE = 'UTC'
+TIME_ZONE = "UTC"
USE_I18N = True
@@ -104,12 +103,11 @@
USE_TZ = True
+STATIC_URL = "/static/"
-STATIC_URL = '/static/'
-
-LOGIN_URL = '/login'
+LOGIN_URL = "/login"
# Auto primary keys
-DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
+DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
from preflib.local_settings import *
diff --git a/preflib/urls.py b/preflib/urls.py
index 130825b..5fce1a3 100644
--- a/preflib/urls.py
+++ b/preflib/urls.py
@@ -13,16 +13,17 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
+
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import handler400, handler403, handler404, handler500
-handler400 = 'preflibapp.views.error_400_view'
-handler403 = 'preflibapp.views.error_403_view'
-handler404 = 'preflibapp.views.error_404_view'
-handler500 = 'preflibapp.views.error_500_view'
+handler400 = "preflibapp.views.error_400_view"
+handler403 = "preflibapp.views.error_403_view"
+handler404 = "preflibapp.views.error_404_view"
+handler500 = "preflibapp.views.error_500_view"
urlpatterns = [
- path('djangoadmin/', admin.site.urls),
- path('', include('preflibapp.urls'))
+ path("djangoadmin/", admin.site.urls),
+ path("", include("preflibapp.urls")),
]
diff --git a/preflib/wsgi.py b/preflib/wsgi.py
index 3ba442d..635f0cc 100644
--- a/preflib/wsgi.py
+++ b/preflib/wsgi.py
@@ -11,6 +11,6 @@
from django.core.wsgi import get_wsgi_application
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'preflib.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "preflib.settings")
application = get_wsgi_application()
diff --git a/preflibapp/admin.py b/preflibapp/admin.py
index 5996cee..e300657 100644
--- a/preflibapp/admin.py
+++ b/preflibapp/admin.py
@@ -8,4 +8,4 @@
admin.site.register(Metadata)
admin.site.register(DataProperty)
admin.site.register(Paper)
-admin.site.register(Log)
\ No newline at end of file
+admin.site.register(Log)
diff --git a/preflibapp/apps.py b/preflibapp/apps.py
index 1dd4476..693b4a5 100644
--- a/preflibapp/apps.py
+++ b/preflibapp/apps.py
@@ -2,4 +2,4 @@
class PreflibappConfig(AppConfig):
- name = 'preflibapp'
+ name = "preflibapp"
diff --git a/preflibapp/choices.py b/preflibapp/choices.py
index 35a1b68..6931c0e 100644
--- a/preflibapp/choices.py
+++ b/preflibapp/choices.py
@@ -1,5 +1,5 @@
def find_choice_value(choices, key):
- for (k, v) in choices:
+ for k, v in choices:
if k == key:
return v
return None
@@ -13,35 +13,32 @@ def is_choice(choices, choice):
DATATYPES = [
- ('soc', 'strict order complete'),
- ('soi', 'strict order incomplete'),
- ('toc', 'tie order complete'),
- ('toi', 'tie order incomplete'),
- ('cat', 'categorical'),
+ ("soc", "strict order complete"),
+ ("soi", "strict order incomplete"),
+ ("toc", "tie order complete"),
+ ("toi", "tie order incomplete"),
+ ("cat", "categorical"),
# ('tog', 'tournament graph'),
# ('mjg', 'majority graph'),
# ('wmg', 'weighted majority graph'),
# ('pwg', 'pairwise graph'),
- ('wmd', 'weighted matching data'),
- ('dat', 'extra data file'),
- ('csv', 'comma-separated values')
+ ("wmd", "weighted matching data"),
+ ("dat", "extra data file"),
+ ("csv", "comma-separated values"),
]
MODIFICATIONTYPES = [
- ('original', 'original'),
- ('induced', 'induced'),
- ('imbued', 'imbued'),
- ('synthetic', 'synthetic')
+ ("original", "original"),
+ ("induced", "induced"),
+ ("imbued", "imbued"),
+ ("synthetic", "synthetic"),
]
METADATACATEGORIES = [
- ('general', 'general properties'),
- ('preference', 'preference structure'),
- ('ballot', 'ballot structure'),
- ('aggregation', 'aggregtated structure')
+ ("general", "general properties"),
+ ("preference", "preference structure"),
+ ("ballot", "ballot structure"),
+ ("aggregation", "aggregtated structure"),
]
-SEARCHWIDGETS = [
- ('ternary', 'ternary choices'),
- ('range', 'range')
-]
+SEARCHWIDGETS = [("ternary", "ternary choices"), ("range", "range")]
diff --git a/preflibapp/management/commands/adddataset.py b/preflibapp/management/commands/adddataset.py
index 03f466e..c6a74ca 100644
--- a/preflibapp/management/commands/adddataset.py
+++ b/preflibapp/management/commands/adddataset.py
@@ -2,168 +2,131 @@
from django.contrib.staticfiles import finders
from django.core import management
from django.db.models import Max
+from preflibtools.instances.dataset import read_info_file
import preflibapp
from preflibapp.models import *
-from preflibtools.instances.preflibinstance import OrdinalInstance, CategoricalInstance, MatchingInstance
+from preflibtools.instances.preflibinstance import (
+ OrdinalInstance,
+ CategoricalInstance,
+ MatchingInstance,
+)
import traceback
import zipfile
import os
-def read_info_file(file_name):
- infos = {'files': {}}
- with open(file_name, 'r') as file:
- # We go line per line trying to match the beginning of the line to a known header tag
- lines = file.readlines()
- line_index = 0
- for line_index in range(len(lines)):
- line = lines[line_index]
- if len(line) > 1:
- if line.startswith('Name:'):
- infos['name'] = line[5:].strip()
- elif line.startswith('Abbreviation:'):
- infos['abb'] = line[13:].strip()
- elif line.startswith('Tags:'):
- infos['tags'] = [tag.strip() for tag in line[5:].strip().split(',')]
- elif line.startswith('Series Number:'):
- infos['series'] = line[14:].strip()
- elif line.startswith('Publication Date:'):
- infos['publication_date'] = line[17:].strip()
- elif line.startswith('Description:'):
- infos['description'] = line[12:].strip()
- elif line.startswith('Required Citations:'):
- infos['citations'] = line[19:].strip() if line[19:].strip() != "None" else ""
- elif line.startswith('Selected Studies:'):
- infos['studies'] = line[17:].strip() if line[17:].strip() != "None" else ""
- elif line.startswith('file_name, modification_type, relates_to, title, description, publication_date'):
- break
- # We are now reading the description of the files
- for line in lines[line_index + 1:]:
- line = line.strip()
- if len(line) > 0:
- split_line = line.split(',')
- new_split_line = []
- inside_quotes = False
- tmp_split = ''
- for split in split_line:
- split = split.strip()
- if len(split) > 0:
- if inside_quotes:
- if split[-3:] == '"""':
- tmp_split += split[:-3]
- new_split_line.append(tmp_split)
- inside_quotes = False
- else:
- tmp_split += split + ', '
- else:
- if split[0:3] == '"""':
- tmp_split += split[3:] + ', '
- inside_quotes = True
- else:
- new_split_line.append(split)
- else:
- new_split_line.append('')
- infos['files'][new_split_line[0].strip()] = {
- 'file_name': new_split_line[0].strip(),
- 'modification_type': new_split_line[1].strip(),
- 'relates_to': new_split_line[2].strip(),
- 'title': new_split_line[3].strip(),
- 'description': new_split_line[4].strip(),
- 'publication_date': new_split_line[5].strip()
- }
- return infos
-
-
def add_dataset(file_path, tmp_dir, data_dir, keepzip, log):
# We start by extracting the zip file
- with zipfile.ZipFile(file_path, 'r') as archive:
+ with zipfile.ZipFile(file_path, "r") as archive:
archive.extractall(tmp_dir)
# We try to read and parse the info file, if we don't find the info.txt file, we skip the dataset
if os.path.exists(os.path.join(tmp_dir, "info.txt")):
infos = read_info_file(os.path.join(tmp_dir, "info.txt"))
else:
- raise Exception("No info.txt file has been found for " + str(file_path) + " ... skipping it.")
+ raise Exception(
+ "No info.txt file has been found for "
+ + str(file_path)
+ + " ... skipping it."
+ )
# Now that we have all the infos, we can create the dataset object in the database
dataset_obj, _ = DataSet.objects.update_or_create(
- abbreviation=infos['abb'],
+ abbreviation=infos["abb"],
defaults={
- 'name': infos['name'],
- 'series_number': infos['series'],
- 'zip_file_path': None,
- 'zip_file_size': 0,
- 'description': infos['description'],
- 'required_citations': infos['citations'],
- 'selected_studies': infos['studies'],
- 'publication_date': infos['publication_date']})
+ "name": infos["name"],
+ "series_number": infos["series"],
+ "zip_file_path": None,
+ "zip_file_size": 0,
+ "description": infos["description"],
+ "required_citations": infos["citations"],
+ "selected_studies": infos["studies"],
+ "publication_date": infos["publication_date"],
+ },
+ )
# We add the tags, creating them in the database if needed
for tag in infos["tags"]:
tag_obj, _ = DataTag.objects.get_or_create(
- name=tag,
- defaults={
- 'description': 'No description provided.'
- }
+ name=tag, defaults={"description": "No description provided."}
)
dataset_obj.tags.add(tag_obj)
dataset_obj.save()
# We create a folder for the dataset in the data folder
try:
- os.makedirs(os.path.join(data_dir, infos['abb']))
+ os.makedirs(os.path.join(data_dir, infos["abb"]))
except FileExistsError:
pass
- if os.path.exists(os.path.join(data_dir, infos['abb'], "info.txt")):
- os.remove(os.path.join(data_dir, infos['abb'], "info.txt"))
- os.rename(os.path.join(tmp_dir, "info.txt"), os.path.join(data_dir, infos['abb'], "info.txt"))
+ if os.path.exists(os.path.join(data_dir, infos["abb"], "info.txt")):
+ os.remove(os.path.join(data_dir, infos["abb"], "info.txt"))
+ os.rename(
+ os.path.join(tmp_dir, "info.txt"),
+ os.path.join(data_dir, infos["abb"], "info.txt"),
+ )
# Let's now add the datafiles to the database
relates_to_dict = {}
for file_name in os.listdir(tmp_dir):
- extension = os.path.splitext(file_name)[1][1:] # Using [1:] here to remove the dot
+ extension = os.path.splitext(file_name)[1][
+ 1:
+ ] # Using [1:] here to remove the dot
# We only do it if it actually is a file we're interested in
if is_choice(DATATYPES, extension):
print("\t{}".format(file_name))
# Move the file to the folder of the dataset
- if os.path.exists(os.path.join(data_dir, infos['abb'], file_name)):
- os.remove(os.path.join(data_dir, infos['abb'], file_name))
- os.rename(os.path.join(tmp_dir, file_name), os.path.join(data_dir, infos['abb'], file_name))
+ if os.path.exists(os.path.join(data_dir, infos["abb"], file_name)):
+ os.remove(os.path.join(data_dir, infos["abb"], file_name))
+ os.rename(
+ os.path.join(tmp_dir, file_name),
+ os.path.join(data_dir, infos["abb"], file_name),
+ )
# Parsing the parsable files or looking through the infos we collected to see if the file appears there
if extension in ["soc", "soi", "toc", "toi", "cat", "wmd"]:
if extension == "cat":
- instance = CategoricalInstance(os.path.join(data_dir, infos['abb'], file_name))
+ instance = CategoricalInstance(
+ os.path.join(data_dir, infos["abb"], file_name)
+ )
elif extension == "wmd":
- instance = MatchingInstance(os.path.join(data_dir, infos['abb'], file_name))
+ instance = MatchingInstance(
+ os.path.join(data_dir, infos["abb"], file_name)
+ )
else:
- instance = OrdinalInstance(os.path.join(data_dir, infos['abb'], file_name))
+ instance = OrdinalInstance(
+ os.path.join(data_dir, infos["abb"], file_name)
+ )
file_info = {
- 'modification_type': instance.modification_type,
- 'title': instance.title,
- 'description': instance.description,
- 'relates_to': instance.relates_to,
- 'publication_date': instance.publication_date,
+ "modification_type": instance.modification_type,
+ "title": instance.title,
+ "description": instance.description,
+ "relates_to": instance.relates_to,
+ "publication_date": instance.publication_date,
}
else:
- file_info = infos['files'].get(file_name)
+ file_info = infos["files"].get(file_name)
if not file_info:
file_info = {
- 'modification_type': '-',
- 'title': '',
- 'description': '-',
- 'relates_to': '',
- 'publication_date': timezone.now(),
+ "modification_type": "-",
+ "title": "",
+ "description": "-",
+ "relates_to": "",
+ "publication_date": timezone.now(),
}
print("No info found for {}".format(file_name))
- log.append("\n\n
No info has been found for the file " + str(file_name) +
- " in the info file of " + str(file_path) + "
\n\nNo info has been found for the file "
+ + str(file_name)
+ + " in the info file of "
+ + str(file_path)
+ + "
\n"
+ )
# We can finally create (or update) the datafile object in the database
datafile_obj, _ = DataFile.objects.update_or_create(
@@ -171,15 +134,19 @@ def add_dataset(file_path, tmp_dir, data_dir, keepzip, log):
defaults={
"dataset": dataset_obj,
"data_type": os.path.splitext(file_name)[1][1:],
- "modification_type": file_info['modification_type'],
- "title": file_info['title'],
- "description": file_info['description'],
- "file_path": 'data/{}/{}'.format(infos['abb'], file_name),
- "file_size": os.path.getsize(os.path.join(data_dir, infos['abb'], file_name)),
- "publication_date": file_info['publication_date']})
-
- if file_info['relates_to']:
- relates_to_dict[datafile_obj] = file_info['relates_to']
+ "modification_type": file_info["modification_type"],
+ "title": file_info["title"],
+ "description": file_info["description"],
+ "file_path": "data/{}/{}".format(infos["abb"], file_name),
+ "file_size": os.path.getsize(
+ os.path.join(data_dir, infos["abb"], file_name)
+ ),
+ "publication_date": file_info["publication_date"],
+ },
+ )
+
+ if file_info["relates_to"]:
+ relates_to_dict[datafile_obj] = file_info["relates_to"]
for datafile, relates_to_name in relates_to_dict.items():
related_file = DataFile.objects.get(file_name=relates_to_name)
@@ -195,26 +162,34 @@ class Command(BaseCommand):
help = "Add datasets to database"
def add_arguments(self, parser):
- parser.add_argument('-d', nargs='*', type=str)
- parser.add_argument('-f', nargs='*', type=str)
- parser.add_argument('--all', action='store_true')
- parser.add_argument('--keepzip', action='store_true')
+ parser.add_argument("-d", nargs="*", type=str)
+ parser.add_argument("-f", nargs="*", type=str)
+ parser.add_argument("--all", action="store_true")
+ parser.add_argument("--keepzip", action="store_true")
def handle(self, *args, **options):
- if not options['d'] and not options['f']:
- print("ERROR: you need to pass an input argument: either -d for a directory of -f for a single file.")
+ if not options["d"] and not options["f"]:
+ print(
+ "ERROR: you need to pass an input argument: either -d for a directory of -f for a single file."
+ )
return
- if options['f']:
- for file_path in options['f']:
+ if options["f"]:
+ for file_path in options["f"]:
if os.path.splitext(file_path)[1] != ".zip":
- print("ERROR: the argument -f should point to a zip file, and {} does not look like one.".format(
- file_path))
+ print(
+ "ERROR: the argument -f should point to a zip file, and {} does not look like one.".format(
+ file_path
+ )
+ )
return
- if options['d']:
- for dir_path in options['d']:
+ if options["d"]:
+ for dir_path in options["d"]:
if not os.path.isdir(dir_path):
- print("ERROR: the argument -d should point to a directory, and {} does not look like one.".format(
- dir_path))
+ print(
+ "ERROR: the argument -d should point to a directory, and {} does not look like one.".format(
+ dir_path
+ )
+ )
return
log = []
@@ -222,7 +197,9 @@ def handle(self, *args, **options):
try:
# Initializing the log
- new_log_num = Log.objects.filter(log_type="add_dataset").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="add_dataset").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
@@ -232,7 +209,9 @@ def handle(self, *args, **options):
data_dir = finders.find("data")
if not data_dir:
try:
- data_dir = os.path.join(os.path.dirname(preflibapp.__file__), "static", "data")
+ data_dir = os.path.join(
+ os.path.dirname(preflibapp.__file__), "static", "data"
+ )
os.makedirs(data_dir)
except FileExistsError:
pass
@@ -245,38 +224,54 @@ def handle(self, *args, **options):
pass
# Starting the log
- log = [" Adding dataset #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n",
- "\n\t- args : " + str(args) + "
\n\t- options : " + str(options) +
- "
\n
\n"]
+ log = [
+ " Adding dataset #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n",
+ "\n\t- args : "
+ + str(args)
+ + "
\n\t- options : "
+ + str(options)
+ + "
\n
\n",
+ ]
# If the option 'all' has been passed, we add all the datasets in the datatoadd folder
# to the 'file' option
- if options['d']:
- if not options['f']:
- options['f'] = []
- for dir_path in options['d']:
+ if options["d"]:
+ if not options["f"]:
+ options["f"] = []
+ for dir_path in options["d"]:
for filename in os.listdir(dir_path):
if filename.endswith(".zip"):
- options['f'].append(os.path.join(dir_path, filename))
+ options["f"].append(os.path.join(dir_path, filename))
# Starting the real stuff
log.append("Adding datasets
\n\n")
start_time = timezone.now()
- for file_path in options['f']:
+ for file_path in options["f"]:
# We only consider zip files
- if os.path.splitext(file_path)[1] == '.zip':
+ if os.path.splitext(file_path)[1] == ".zip":
# Let's work on the dataset
file_name = os.path.basename(file_path)
print("Adding dataset " + str(file_name))
log.append("\n\t- Dataset " + str(file_name) + "... ")
try:
# Actually adding the dataset
- add_dataset(file_path, tmp_dir, data_dir, options['keepzip'], log)
+ add_dataset(
+ file_path, tmp_dir, data_dir, options["keepzip"], log
+ )
log.append(" ... done.
\n")
except Exception as e:
# If something happened, we log it and move on
- log.append("\n
\n" + str(e) + "
\n" + str(traceback.format_exc()) +
- "
\n\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
\n"
+ )
print(traceback.format_exc())
print(e)
finally:
@@ -297,13 +292,20 @@ def handle(self, *args, **options):
management.call_command("collectstatic", no_input=False)
except Exception as e:
# If anything happened during the execution, we log it and move on
- log.append("\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print(e)
finally:
# In any cases, we save the log
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="add_dataset",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
diff --git a/preflibapp/management/commands/clearcache.py b/preflibapp/management/commands/clearcache.py
index 42251e7..4f2fe90 100644
--- a/preflibapp/management/commands/clearcache.py
+++ b/preflibapp/management/commands/clearcache.py
@@ -5,4 +5,4 @@
class Command(BaseCommand):
def handle(self, *args, **kwargs):
cache.clear()
- self.stdout.write('Cache cleared\n')
+ self.stdout.write("Cache cleared\n")
diff --git a/preflibapp/management/commands/deldataset.py b/preflibapp/management/commands/deldataset.py
index 66890ae..3b31794 100644
--- a/preflibapp/management/commands/deldataset.py
+++ b/preflibapp/management/commands/deldataset.py
@@ -14,8 +14,8 @@ class Command(BaseCommand):
help = "Add datasets to database"
def add_arguments(self, parser):
- parser.add_argument('--abb', nargs='*', type=str)
- parser.add_argument('--all', action='store_true')
+ parser.add_argument("--abb", nargs="*", type=str)
+ parser.add_argument("--all", action="store_true")
def handle(self, *args, **options):
@@ -24,30 +24,43 @@ def handle(self, *args, **options):
try:
# Initializing the log
- new_log_num = Log.objects.filter(log_type="del_dataset").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="del_dataset").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Starting the log
- log = [" Deleting dataset #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n",
- "\n\t- args : " + str(args) + "
\n\t- options : " + str(options) +
- "
\n
\n"]
+ log = [
+ " Deleting dataset #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n",
+ "\n\t- args : "
+ + str(args)
+ + "
\n\t- options : "
+ + str(options)
+ + "
\n
\n",
+ ]
# Looking for the data folder
data_dir = finders.find("data")
if not data_dir:
- log.append("\nThere is no data folder in the static folder, that is weird...
")
+ log.append(
+ "\nThere is no data folder in the static folder, that is weird...
"
+ )
# Starting the real stuff
log.append("Deleting datasets
\n\n")
start_time = timezone.now()
- if options['all']:
- options['abb'] = DataSet.objects.values_list('abbreviation', flat=True)
+ if options["all"]:
+ options["abb"] = DataSet.objects.values_list("abbreviation", flat=True)
- for abbreviation in options['abb']:
+ for abbreviation in options["abb"]:
# Get the dataset
dataset = DataSet.objects.get(abbreviation=abbreviation)
@@ -66,12 +79,19 @@ def handle(self, *args, **options):
except Exception as e:
# If anything happened during the execution, we log it and move on
- log.append("\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print(e)
finally:
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="del_dataset",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
diff --git a/preflibapp/management/commands/generatezip.py b/preflibapp/management/commands/generatezip.py
index 82b2af6..32c84db 100644
--- a/preflibapp/management/commands/generatezip.py
+++ b/preflibapp/management/commands/generatezip.py
@@ -14,7 +14,9 @@ def zip_dataset(dataset, data_dir):
# First locate the dataset folder
ds_dir = os.path.join(data_dir, dataset.abbreviation)
# Create the zip file for the dataset
- zipf = zipfile.ZipFile(os.path.join(ds_dir, dataset.abbreviation + ".zip"), "w", zipfile.ZIP_DEFLATED)
+ zipf = zipfile.ZipFile(
+ os.path.join(ds_dir, dataset.abbreviation + ".zip"), "w", zipfile.ZIP_DEFLATED
+ )
# Add all the files to the zip archive
for datafile in dataset.files.all():
@@ -27,17 +29,26 @@ def zip_dataset(dataset, data_dir):
zipf.close()
data_dir_name = os.path.basename(os.path.normpath(data_dir))
- dataset.zip_file_path = os.path.join(data_dir_name, dataset.abbreviation, dataset.abbreviation + ".zip")
- dataset.zip_file_size = os.path.getsize(os.path.join(ds_dir, dataset.abbreviation + ".zip"))
+ dataset.zip_file_path = os.path.join(
+ data_dir_name, dataset.abbreviation, dataset.abbreviation + ".zip"
+ )
+ dataset.zip_file_size = os.path.getsize(
+ os.path.join(ds_dir, dataset.abbreviation + ".zip")
+ )
dataset.save()
def zip_type(data_type, data_dir):
- if data_type in ('dat',):
+ if data_type in ("dat",):
return
- zipf = zipfile.ZipFile(os.path.join(data_dir, "types", data_type + ".zip"), "w", zipfile.ZIP_DEFLATED)
+ zipf = zipfile.ZipFile(
+ os.path.join(data_dir, "types", data_type + ".zip"), "w", zipfile.ZIP_DEFLATED
+ )
for datafile in DataFile.objects.filter(data_type=data_type):
- zipf.write(os.path.join(os.path.dirname(data_dir), datafile.file_path), datafile.file_name)
+ zipf.write(
+ os.path.join(os.path.dirname(data_dir), datafile.file_path),
+ datafile.file_name,
+ )
zipf.close()
@@ -60,14 +71,22 @@ def handle(self, *args, **options):
try:
# Initializing a new log
- new_log_num = Log.objects.filter(log_type="zip").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="zip").aggregate(Max("log_num"))[
+ "log_num__max"
+ ]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Starting the log
- log = [" Zipping log #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n"]
+ log = [
+ " Zipping log #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n"
+ ]
start_time = timezone.now()
# We start by zipping the data sets
@@ -87,7 +106,11 @@ def handle(self, *args, **options):
# We actually zip the types
log.append("\nZipping data files by type
\n\n")
- for data_type in DataFile.objects.order_by().values_list('data_type', flat=True).distinct():
+ for data_type in (
+ DataFile.objects.order_by()
+ .values_list("data_type", flat=True)
+ .distinct()
+ ):
print("Zipping type " + data_type)
log.append("\t- Zipping type " + data_type + "... ")
zip_type(data_type, data_dir)
@@ -96,7 +119,10 @@ def handle(self, *args, **options):
# We finish the log
log.append("\n
Regeneration of the zip files successfully completed in ")
- log.append(str((timezone.now() - start_time).total_seconds() / 60) + " minutes
\n")
+ log.append(
+ str((timezone.now() - start_time).total_seconds() / 60)
+ + " minutes\n"
+ )
# And finally collect the statics
print("Finished, collecting statics")
@@ -104,7 +130,13 @@ def handle(self, *args, **options):
except Exception as e:
# If anything happened, we log it and move on
- log.append("\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print(e)
@@ -112,7 +144,8 @@ def handle(self, *args, **options):
# In any cases we remove the lock and save the log
os.remove(os.path.join(data_dir, "zip.lock"))
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="zip",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
diff --git a/preflibapp/management/commands/initializedb.py b/preflibapp/management/commands/initializedb.py
index 90ca7d1..bcdcf5b 100644
--- a/preflibapp/management/commands/initializedb.py
+++ b/preflibapp/management/commands/initializedb.py
@@ -8,56 +8,56 @@ def initialize_tags():
name="Election",
defaults={
"description": "The preferences apply to scenario in which some alternatives are to be selected/elected.",
- }
+ },
)
sport_tag, _ = DataTag.objects.update_or_create(
name="Sport",
defaults={
"description": "The data represent sport events, interpreted as elections.",
- }
+ },
)
politics_tag, _ = DataTag.objects.update_or_create(
name="Politics",
defaults={
"description": "The preferences apply to political scenario.",
- }
+ },
)
politics_tag, _ = DataTag.objects.update_or_create(
name="STV",
defaults={
"description": "STV (single-transferable vote) was the voting rule used for the selection of the winner.",
- }
+ },
)
politics_tag, _ = DataTag.objects.update_or_create(
name="MTurk",
defaults={
"description": "The preferences were collected on Amazon Mechanical Turk.",
- }
+ },
)
matching_tag, _ = DataTag.objects.update_or_create(
name="Matching",
defaults={
"description": "The preferences apply to scenario in which alternatives are to be matched to one another.",
- }
+ },
)
ratings_tag, _ = DataTag.objects.update_or_create(
name="Ratings",
defaults={
"description": "The preferences express ratings about the alternatives.",
- }
+ },
)
combi_tag, _ = DataTag.objects.update_or_create(
name="Combinatorial",
defaults={
"description": "The data represent combinatorial preferences over the alternatives.",
- }
+ },
)
@@ -67,10 +67,10 @@ def initialize_metadata():
defaults={
"category": "general",
"description": "The number of alternatives is the number of elements agents had to vote on. It is only "
- "available for data representing orderings of the alternatives.",
+ "available for data representing orderings of the alternatives.",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi,cat,wmd',
+ "applies_to": "soc,soi,toc,toi,cat,wmd",
"inner_module": "preflibtools.properties.basic",
"inner_function": "num_alternatives",
"inner_type": "int",
@@ -78,17 +78,19 @@ def initialize_metadata():
"short_name": "numAlt",
"search_question": "Number of alternatives:",
"search_res_name": "#Alternatives",
- "order_priority": 1})
+ "order_priority": 1,
+ },
+ )
metadata_num_vot, _ = Metadata.objects.update_or_create(
name="Number of voters",
defaults={
"category": "general",
"description": "The number of voters is the number of ballots that were submitted. For weighted matching "
- "graphs",
+ "graphs",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi,cat,wmd',
+ "applies_to": "soc,soi,toc,toi,cat,wmd",
"inner_module": "preflibtools.properties.basic",
"inner_function": "num_voters",
"inner_type": "int",
@@ -96,7 +98,9 @@ def initialize_metadata():
"short_name": "numVot",
"search_question": "Number of voters:",
"search_res_name": "#Voters",
- "order_priority": 2})
+ "order_priority": 2,
+ },
+ )
metadata_uniq_preferences, _ = Metadata.objects.update_or_create(
name="Number of unique preferences",
@@ -105,7 +109,7 @@ def initialize_metadata():
"description": "The number of distinct preferences that were casts.",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "num_different_preferences",
"inner_type": "int",
@@ -113,17 +117,19 @@ def initialize_metadata():
"short_name": "numUniq",
"search_question": "Number of unique orders:",
"search_res_name": "#Unique Ballots",
- "order_priority": 4})
+ "order_priority": 4,
+ },
+ )
metadata_is_strict, _ = Metadata.objects.update_or_create(
name="Strict orders",
defaults={
"category": "preference",
"description": "A boolean value set to True if all the ballots that were cast represent strict linear "
- "orders.",
+ "orders.",
"is_active": True,
"is_displayed": False,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "is_strict",
"inner_type": "bool",
@@ -131,17 +137,19 @@ def initialize_metadata():
"short_name": "isStrict",
"search_question": "Is strict?",
"search_res_name": "Strict",
- "order_priority": 11})
+ "order_priority": 11,
+ },
+ )
metadata_is_complete, _ = Metadata.objects.update_or_create(
name="Complete orders",
defaults={
"category": "preference",
"description": "A boolean value set to True if all the ballots that were cast represent complete linear "
- "orders.",
+ "orders.",
"is_active": True,
"is_displayed": False,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "is_complete",
"inner_type": "bool",
@@ -149,18 +157,20 @@ def initialize_metadata():
"short_name": "isComplete",
"search_question": "Is complete?",
"search_res_name": "Complete",
- "order_priority": 12})
+ "order_priority": 12,
+ },
+ )
metadata_is_app, _ = Metadata.objects.update_or_create(
name="Approval profile",
defaults={
"category": "preference",
"description": "A boolean value set to True if the ballots can be interpreted as approval ballots. That "
- "is the case if, either every ballot consist of a single set of indifferences, or every "
- "ballots is complete and consist of two set of indifferences.",
+ "is the case if, either every ballot consist of a single set of indifferences, or every "
+ "ballots is complete and consist of two set of indifferences.",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "is_approval",
"inner_type": "bool",
@@ -168,7 +178,9 @@ def initialize_metadata():
"short_name": "isApproval",
"search_question": "Is an approval profile?",
"search_res_name": "Approval",
- "order_priority": 13})
+ "order_priority": 13,
+ },
+ )
metadata_is_sp, _ = Metadata.objects.update_or_create(
name="Single-peaked",
@@ -178,7 +190,7 @@ def initialize_metadata():
href="https://en.wikipedia.org/wiki/Single_peaked_preferences">single-peaked preferences.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc',
+ "applies_to": "soc",
"inner_module": "preflibtools.properties.singlepeakedness",
"inner_function": "is_single_peaked",
"inner_type": "bool",
@@ -186,17 +198,19 @@ def initialize_metadata():
"short_name": "isSP",
"search_question": "Is single-peaked?",
"search_res_name": "Single-Peaked",
- "order_priority": 14})
+ "order_priority": 14,
+ },
+ )
metadata_is_sc, _ = Metadata.objects.update_or_create(
name="Single-crossing",
defaults={
"category": "preference",
"description": "A boolean value set to True if the set of ballots cast represents single-crossing "
- "preferences.",
+ "preferences.",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc',
+ "applies_to": "soc",
"inner_module": "preflibtools.properties.singlecrossing",
"inner_function": "is_single_crossing",
"inner_type": "bool",
@@ -204,7 +218,9 @@ def initialize_metadata():
"short_name": "isSC",
"search_question": "Is single-crossing?",
"search_res_name": "Single-Crossing",
- "order_priority": 15})
+ "order_priority": 15,
+ },
+ )
metadata_larg_ballot, _ = Metadata.objects.update_or_create(
name="Size of the largest ballot",
@@ -216,7 +232,7 @@ def initialize_metadata():
of alternatives.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "largest_ballot",
"inner_type": "int",
@@ -224,7 +240,9 @@ def initialize_metadata():
"short_name": "largestBallot",
"search_question": "Size of the largest ballot:",
"search_res_name": "Largest Ballot",
- "order_priority": 6})
+ "order_priority": 6,
+ },
+ )
metadata_smal_ballot, _ = Metadata.objects.update_or_create(
name="Size of the smallest ballot",
@@ -236,7 +254,7 @@ def initialize_metadata():
of alternatives.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "smallest_ballot",
"inner_type": "int",
@@ -244,7 +262,9 @@ def initialize_metadata():
"short_name": "smallestBallot",
"search_question": "Size of the smallest ballot:",
"search_res_name": "Smallest Ballot",
- "order_priority": 5})
+ "order_priority": 5,
+ },
+ )
metadata_max_num_indif, _ = Metadata.objects.update_or_create(
name="Maximum number of indifferences",
@@ -256,7 +276,7 @@ def initialize_metadata():
href="/format#soc">SOC and SOI), this should be 0.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "max_num_indif",
"inner_type": "int",
@@ -264,7 +284,9 @@ def initialize_metadata():
"short_name": "maxNumIndif",
"search_question": "Maximum number of indifferences:",
"search_res_name": "Max #Indif.",
- "order_priority": 8})
+ "order_priority": 8,
+ },
+ )
metadata_min_num_indif, _ = Metadata.objects.update_or_create(
name="Minimum number of indifferences",
@@ -276,7 +298,7 @@ def initialize_metadata():
href="/format#soc">SOC and SOI), this should be 0.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "min_num_indif",
"inner_type": "int",
@@ -284,7 +306,9 @@ def initialize_metadata():
"short_name": "minNumIndif",
"search_question": "Minimum number of indifferences:",
"search_res_name": "Min #Indif.",
- "order_priority": 7})
+ "order_priority": 7,
+ },
+ )
metadata_larg_indif, _ = Metadata.objects.update_or_create(
name="Size of the largest indifference",
@@ -296,7 +320,7 @@ def initialize_metadata():
href="/format#soc">SOC and SOI), this should be 0.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "largest_indif",
"inner_type": "int",
@@ -304,7 +328,9 @@ def initialize_metadata():
"short_name": "largestIndif",
"search_question": "Size of the largest indifference:",
"search_res_name": "Largest Indif.",
- "order_priority": 10})
+ "order_priority": 10,
+ },
+ )
metadata_smal_indif, _ = Metadata.objects.update_or_create(
name="Size of the smallest indifference",
@@ -316,7 +342,7 @@ def initialize_metadata():
href="/format#soc">SOC and SOI), this should be 0.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,soi,toc,toi',
+ "applies_to": "soc,soi,toc,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "smallest_indif",
"inner_type": "int",
@@ -324,7 +350,9 @@ def initialize_metadata():
"short_name": "smallestIndif",
"search_question": "Size of the smallest indifference:",
"search_res_name": "Smallest Indif.",
- "order_priority": 9})
+ "order_priority": 9,
+ },
+ )
metadata_condorcet, _ = Metadata.objects.update_or_create(
name="Condorcet winner",
@@ -334,7 +362,7 @@ def initialize_metadata():
href="https://en.wikipedia.org/wiki/Condorcet_criterion">Condorcet winner.""",
"is_active": True,
"is_displayed": True,
- "applies_to": 'soc,toc,soi,toi',
+ "applies_to": "soc,toc,soi,toi",
"inner_module": "preflibtools.properties.basic",
"inner_function": "has_condorcet",
"inner_type": "bool",
@@ -342,7 +370,9 @@ def initialize_metadata():
"short_name": "hasCondorcet",
"search_question": "Has a Condorcet winner?",
"search_res_name": "Condorcet",
- "order_priority": 16})
+ "order_priority": 16,
+ },
+ )
metadata_num_alt.upper_bounds.set([])
metadata_num_alt.upper_bounds.add(metadata_smal_ballot)
diff --git a/preflibapp/management/commands/updatemetadata.py b/preflibapp/management/commands/updatemetadata.py
index 87efc92..0683498 100644
--- a/preflibapp/management/commands/updatemetadata.py
+++ b/preflibapp/management/commands/updatemetadata.py
@@ -30,9 +30,11 @@ def update_dataprop(pk_log_meta):
datafile=datafile,
metadata=m,
defaults={
- "value": getattr(importlib.import_module(m.inner_module), m.inner_function)(
- preflib_instance)
- })
+ "value": getattr(
+ importlib.import_module(m.inner_module), m.inner_function
+ )(preflib_instance)
+ },
+ )
dataprop_obj.save()
log.append(" ... done. \n")
@@ -41,15 +43,16 @@ class Command(BaseCommand):
help = "Update the metadata of the data file"
def add_arguments(self, parser):
- parser.add_argument('--abb', nargs='*', type=str)
- parser.add_argument('--all', action='store_true')
- parser.add_argument('--meta', nargs='*', type=str)
+ parser.add_argument("--abb", nargs="*", type=str)
+ parser.add_argument("--all", action="store_true")
+ parser.add_argument("--meta", nargs="*", type=str)
def handle(self, *args, **options):
- if not options['all'] and not options['abb']:
+ if not options["all"] and not options["abb"]:
print(
"ERROR: you need to pass at least one dataset to write (with option --abb DATASET_ABBREVIATION) or "
- "the option --all.")
+ "the option --all."
+ )
return
# Check if there is directory "data" exists in the statics
@@ -63,25 +66,37 @@ def handle(self, *args, **options):
try:
# Initialize a new log
- new_log_num = Log.objects.filter(log_type="metadata").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="metadata").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Either the datasets have been specified or we run through all of them
- if options['all']:
- options['abb'] = DataSet.objects.values_list('abbreviation', flat=True)
+ if options["all"]:
+ options["abb"] = DataSet.objects.values_list("abbreviation", flat=True)
- datafiles = DataFile.objects.filter(dataset__abbreviation__in=options["abb"]).annotate(num_props=Count('metadata')).order_by('num_props')
+ datafiles = (
+ DataFile.objects.filter(dataset__abbreviation__in=options["abb"])
+ .annotate(num_props=Count("metadata"))
+ .order_by("num_props")
+ )
metadata = Metadata.objects.filter(is_active=True)
- if options['meta']:
- metadata = metadata.filter(short_name__in=options['meta'])
- print("Only considering {}".format(options['meta']))
+ if options["meta"]:
+ metadata = metadata.filter(short_name__in=options["meta"])
+ print("Only considering {}".format(options["meta"]))
# Starting the real stuff
- log = [" Updating the metadata #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n"]
+ log = [
+ " Updating the metadata #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n"
+ ]
multiproc = False
start_time = timezone.now()
if multiproc:
@@ -98,7 +113,10 @@ def handle(self, *args, **options):
# Closing the log
log.append("\nMetadata updated in ")
- log.append(str((timezone.now() - start_time).total_seconds() / 60) + " minutes
\n")
+ log.append(
+ str((timezone.now() - start_time).total_seconds() / 60)
+ + " minutes\n"
+ )
# Collecting statics at the end
print("Finished, collecting statics")
@@ -106,14 +124,21 @@ def handle(self, *args, **options):
except Exception as e:
# If an exception occured during runtime, we log it and continue
- log.append("\n
\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n
\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print("Exception " + str(e))
finally:
# In any cases, we save the log
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="metadata",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
diff --git a/preflibapp/management/commands/updatepapers.py b/preflibapp/management/commands/updatepapers.py
index d5d3f31..a24e958 100644
--- a/preflibapp/management/commands/updatepapers.py
+++ b/preflibapp/management/commands/updatepapers.py
@@ -19,14 +19,22 @@ def handle(self, *args, **options):
try:
# Initializing a new log
- new_log_num = Log.objects.filter(log_type="papers").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="papers").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Starting the log
- log = [" Updating the list of papers #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n"]
+ log = [
+ " Updating the list of papers #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n"
+ ]
# We start by emptying the Paper table
Paper.objects.all().delete()
@@ -37,8 +45,8 @@ def handle(self, *args, **options):
log.append("Reading bib file
\n\n")
# Some regexpr that will be used to parse the bib file
- field_regex = re.compile(r'\b(?P\w+)={(?P[^}]+)}')
- name_regex = re.compile(r'@article{(?P.+),')
+ field_regex = re.compile(r"\b(?P\w+)={(?P[^}]+)}")
+ name_regex = re.compile(r"@article{(?P.+),")
# Parsing the bib file
reading_paper = False
@@ -47,13 +55,13 @@ def handle(self, *args, **options):
num_parenthesis = 0
for line in file.readlines():
for char in line:
- if char == '@':
+ if char == "@":
reading_paper = True
in_at = True
- elif char == '{':
+ elif char == "{":
in_at = False
num_parenthesis += 1
- elif char == '}':
+ elif char == "}":
num_parenthesis -= 1
if reading_paper:
paper_block += char
@@ -72,9 +80,14 @@ def handle(self, *args, **options):
authors=paper_dict["author"],
publisher=paper_dict["journal"],
year=paper_dict["year"],
- url=paper_dict["url"])
+ url=paper_dict["url"],
+ )
- log.append("\t- Created entry for " + paper_dict["name"] + "
\n")
+ log.append(
+ "\t- Created entry for "
+ + paper_dict["name"]
+ + "
\n"
+ )
paper_block = ""
# We close the log
@@ -83,13 +96,20 @@ def handle(self, *args, **options):
except Exception as e:
# If something happend, we log it and move on
- log.append("" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ ""
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(e)
print(traceback.format_exc())
finally:
# In any cases we add the log to the database
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="papers",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
diff --git a/preflibapp/management/commands/writedataset.py b/preflibapp/management/commands/writedataset.py
index 63532ea..025633e 100644
--- a/preflibapp/management/commands/writedataset.py
+++ b/preflibapp/management/commands/writedataset.py
@@ -14,23 +14,26 @@ class Command(BaseCommand):
help = "Add datasets to database"
def add_arguments(self, parser):
- parser.add_argument('-d', type=str, required=True)
- parser.add_argument('--abb', nargs='*', type=str)
- parser.add_argument('--all', action='store_true')
+ parser.add_argument("-d", type=str, required=True)
+ parser.add_argument("--abb", nargs="*", type=str)
+ parser.add_argument("--all", action="store_true")
def handle(self, *args, **options):
- if not options['d']:
- print("ERROR: you need to pass a target directory as argument (with option -d path/to/your/dic).")
+ if not options["d"]:
+ print(
+ "ERROR: you need to pass a target directory as argument (with option -d path/to/your/dic)."
+ )
return
else:
- if not os.path.isdir(options['d']):
- print("ERROR: {} is not a directory.".format(options['d']))
+ if not os.path.isdir(options["d"]):
+ print("ERROR: {} is not a directory.".format(options["d"]))
return
- if not options['all'] and not options['abb']:
+ if not options["all"] and not options["abb"]:
print(
"ERROR: you need to pass at least one dataset to write (with option --abb DATASET_ABBREVIATION) or "
- "the option --all.")
+ "the option --all."
+ )
return
log = []
@@ -38,30 +41,43 @@ def handle(self, *args, **options):
try:
# Initializing the log
- new_log_num = Log.objects.filter(log_type="write_dataset").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="write_dataset").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Starting the log
- log = [" Writing dataset #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n",
- "\n\t- args : " + str(args) + "
\n\t- options : " + str(options) +
- "
\n
\n"]
+ log = [
+ " Writing dataset #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n",
+ "\n\t- args : "
+ + str(args)
+ + "
\n\t- options : "
+ + str(options)
+ + "
\n
\n",
+ ]
# Looking for the data folder
data_dir = finders.find("data")
if not data_dir:
- log.append("\nThere is no data folder in the static folder, that is weird...
")
+ log.append(
+ "\nThere is no data folder in the static folder, that is weird...
"
+ )
# Starting the real stuff
log.append("Writing datasets
\n\n")
start_time = timezone.now()
- if options['all']:
- options['abb'] = DataSet.objects.values_list('abbreviation', flat=True)
+ if options["all"]:
+ options["abb"] = DataSet.objects.values_list("abbreviation", flat=True)
- for abbreviation in options['abb']:
+ for abbreviation in options["abb"]:
log.append("\t- {}
\n".format(abbreviation))
@@ -69,7 +85,10 @@ def handle(self, *args, **options):
dataset = DataSet.objects.get(abbreviation=abbreviation)
# Creating the folder for the dataset, if it already exists, we delete the content
- ds_dir = os.path.join(options['d'], "{} - {}".format(dataset.series_number, dataset.abbreviation))
+ ds_dir = os.path.join(
+ options["d"],
+ "{} - {}".format(dataset.series_number, dataset.abbreviation),
+ )
try:
os.makedirs(ds_dir)
except FileExistsError:
@@ -81,30 +100,41 @@ def handle(self, *args, **options):
# Copy the data files to the folder
for datafile in dataset.files.all():
- shutil.copyfile(os.path.join(data_dir, datafile.file_path),
- os.path.join(ds_dir, datafile.file_name.split('-')[2]))
+ shutil.copyfile(
+ os.path.join(data_dir, datafile.file_path),
+ os.path.join(ds_dir, datafile.file_name.split("-")[2]),
+ )
# Finalizing the log
- log.append("
\nThe datasets have been successfully written. It took ")
+ log.append(
+ "
\nThe datasets have been successfully written. It took "
+ )
log.append(str((timezone.now() - start_time).total_seconds() / 60))
log.append(" minutes.
")
except Exception as e:
# If anything happened during the execution, we log it and move on
- log.append("\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print(e)
finally:
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="write_dataset",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
@staticmethod
def write_info_file(dataset, ds_dir):
def escape_comas(string):
- if ',' in string:
+ if "," in string:
return "'" + string + "'"
return string
@@ -112,19 +142,29 @@ def escape_comas(string):
# File Header
f.write("Name: {}\n\n".format(dataset.name))
f.write("Abbreviation: {}\n\n".format(dataset.abbreviation))
- f.write("Tags: {}\n\n".format(', '.join([tag.name for tag in dataset.tags.all()])))
+ f.write(
+ "Tags: {}\n\n".format(
+ ", ".join([tag.name for tag in dataset.tags.all()])
+ )
+ )
f.write("Series Number: {}\n\n".format(dataset.series_number))
f.write("Publication Date: {}\n\n".format(dataset.publication_date))
f.write("Description: {}\n\n".format(dataset.description))
f.write("Required Citations: {}\n\n".format(dataset.required_citations))
f.write("Selected Studies: {}\n\n".format(dataset.selected_studies))
- f.write("file_name, modification_type, relates_to, title, description, publication_date\n")
+ f.write(
+ "file_name, modification_type, relates_to, title, description, publication_date\n"
+ )
for data_file in dataset.files.all():
- f.write("{}, {}, {}, {}, {}, {}\n".format(data_file.file_name,
- data_file.modification_type,
- '' if data_file.relates_to is None else data_file.relates_to,
- escape_comas(data_file.title),
- escape_comas(data_file.description),
- data_file.publication_date))
+ f.write(
+ "{}, {}, {}, {}, {}, {}\n".format(
+ data_file.file_name,
+ data_file.modification_type,
+ "" if data_file.relates_to is None else data_file.relates_to,
+ escape_comas(data_file.title),
+ escape_comas(data_file.description),
+ data_file.publication_date,
+ )
+ )
f.close()
diff --git a/preflibapp/management/commands/writenewdataset.py b/preflibapp/management/commands/writenewdataset.py
index 9acdd27..a0cac31 100644
--- a/preflibapp/management/commands/writenewdataset.py
+++ b/preflibapp/management/commands/writenewdataset.py
@@ -16,23 +16,26 @@ class Command(BaseCommand):
help = "Add datasets to database"
def add_arguments(self, parser):
- parser.add_argument('-d', type=str, required=True)
- parser.add_argument('--abb', nargs='*', type=str)
- parser.add_argument('--all', action='store_true')
+ parser.add_argument("-d", type=str, required=True)
+ parser.add_argument("--abb", nargs="*", type=str)
+ parser.add_argument("--all", action="store_true")
def handle(self, *args, **options):
- if not options['d']:
- print("ERROR: you need to pass a target directory as argument (with option -d path/to/your/dic).")
+ if not options["d"]:
+ print(
+ "ERROR: you need to pass a target directory as argument (with option -d path/to/your/dic)."
+ )
return
else:
- if not os.path.isdir(options['d']):
- print("ERROR: {} is not a directory.".format(options['d']))
+ if not os.path.isdir(options["d"]):
+ print("ERROR: {} is not a directory.".format(options["d"]))
return
- if not options['all'] and not options['abb']:
+ if not options["all"] and not options["abb"]:
print(
"ERROR: you need to pass at least one dataset to write (with option --abb DATASET_ABBREVIATION) or "
- "the option --all.")
+ "the option --all."
+ )
return
log = []
@@ -40,30 +43,43 @@ def handle(self, *args, **options):
try:
# Initializing the log
- new_log_num = Log.objects.filter(log_type="write_dataset").aggregate(Max('log_num'))['log_num__max']
+ new_log_num = Log.objects.filter(log_type="write_dataset").aggregate(
+ Max("log_num")
+ )["log_num__max"]
if new_log_num is None:
new_log_num = 0
else:
new_log_num += 1
# Starting the log
- log = [" Writing dataset #" + str(new_log_num) + " - " + str(timezone.now()) + "
\n",
- "\n\t- args : " + str(args) + "
\n\t- options : " + str(options) +
- "
\n
\n"]
+ log = [
+ " Writing dataset #"
+ + str(new_log_num)
+ + " - "
+ + str(timezone.now())
+ + "
\n",
+ "\n\t- args : "
+ + str(args)
+ + "
\n\t- options : "
+ + str(options)
+ + "
\n
\n",
+ ]
# Looking for the data folder
data_dir = finders.find("data")
if not data_dir:
- log.append("\nThere is no data folder in the static folder, that is weird...
")
+ log.append(
+ "\nThere is no data folder in the static folder, that is weird...
"
+ )
# Starting the real stuff
log.append("Writing datasets
\n\n")
start_time = timezone.now()
- if options['all']:
- options['abb'] = DataSet.objects.values_list('abbreviation', flat=True)
+ if options["all"]:
+ options["abb"] = DataSet.objects.values_list("abbreviation", flat=True)
- for abbreviation in options['abb']:
+ for abbreviation in options["abb"]:
log.append("\t- {}
\n".format(abbreviation))
@@ -71,7 +87,10 @@ def handle(self, *args, **options):
dataset = DataSet.objects.get(abbreviation=abbreviation)
# Creating the folder for the dataset, if it already exists, we delete the content
- ds_dir = os.path.join(options['d'], "{} - {}".format(dataset.series_number, dataset.abbreviation))
+ ds_dir = os.path.join(
+ options["d"],
+ "{} - {}".format(dataset.series_number, dataset.abbreviation),
+ )
try:
os.makedirs(ds_dir)
except FileExistsError:
@@ -86,26 +105,35 @@ def handle(self, *args, **options):
self.write_datafile(datafile, ds_dir)
# Finalizing the log
- log.append("
\nThe datasets have been successfully written. It took ")
+ log.append(
+ "
\nThe datasets have been successfully written. It took "
+ )
log.append(str((timezone.now() - start_time).total_seconds() / 60))
log.append(" minutes.
")
except Exception as e:
# If anything happened during the execution, we log it and move on
- log.append("\n" + str(e) + "
\n" + str(traceback.format_exc()) + "
")
+ log.append(
+ "\n"
+ + str(e)
+ + "
\n"
+ + str(traceback.format_exc())
+ + "
"
+ )
print(traceback.format_exc())
print(e)
finally:
Log.objects.create(
- log=''.join(log),
+ log="".join(log),
log_type="write_dataset",
log_num=new_log_num,
- publication_date=timezone.now())
+ publication_date=timezone.now(),
+ )
@staticmethod
def write_info_file(dataset, ds_dir):
def escape_comas(string):
- if ',' in string:
+ if "," in string:
return "'" + string + "'"
return string
@@ -113,38 +141,62 @@ def escape_comas(string):
# File Header
f.write("Name: {}\n\n".format(dataset.name))
f.write("Abbreviation: {}\n\n".format(dataset.abbreviation))
- f.write("Tags: {}\n\n".format(', '.join([tag.name for tag in dataset.tags.all()])))
+ f.write(
+ "Tags: {}\n\n".format(
+ ", ".join([tag.name for tag in dataset.tags.all()])
+ )
+ )
f.write("Series Number: {}\n\n".format(dataset.series_number))
f.write("Publication Date: {}\n\n".format(dataset.publication_date))
f.write("Description: {}\n\n".format(dataset.description))
f.write("Required Citations: {}\n\n".format(dataset.required_citations))
f.write("Selected Studies: {}\n\n".format(dataset.selected_studies))
- f.write("file_name, modification_type, relates_to, title, description, publication_date\n")
+ f.write(
+ "file_name, modification_type, relates_to, title, description, publication_date\n"
+ )
for data_file in dataset.files.all():
- f.write("{}, {}, {}, {}, {}, {}\n".format(data_file,
- data_file.modification_type,
- '' if data_file.relates_to is None else data_file.relates_to,
- escape_comas(data_file.title),
- escape_comas(data_file.description),
- data_file.publication_date))
+ f.write(
+ "{}, {}, {}, {}, {}, {}\n".format(
+ data_file,
+ data_file.modification_type,
+ "" if data_file.relates_to is None else data_file.relates_to,
+ escape_comas(data_file.title),
+ escape_comas(data_file.description),
+ data_file.publication_date,
+ )
+ )
f.close()
@staticmethod
def write_datafile(datafile, ds_dir):
- if datafile.data_type in ['soc', 'soi', 'toc', 'toi']:
- instance = OrdinalInstance(os.path.join(os.path.dirname(preflibapp.__file__), "static", datafile.file_path))
- elif datafile.data_type == 'wmd':
- instance = MatchingInstance(os.path.join(os.path.dirname(preflibapp.__file__), "static", datafile.file_path))
+ if datafile.data_type in ["soc", "soi", "toc", "toi"]:
+ instance = OrdinalInstance(
+ os.path.join(
+ os.path.dirname(preflibapp.__file__), "static", datafile.file_path
+ )
+ )
+ elif datafile.data_type == "wmd":
+ instance = MatchingInstance(
+ os.path.join(
+ os.path.dirname(preflibapp.__file__), "static", datafile.file_path
+ )
+ )
else:
- shutil.copy(os.path.join(os.path.dirname(preflibapp.__file__), "static", datafile.file_path),
- os.path.join(ds_dir, datafile.file_name))
+ shutil.copy(
+ os.path.join(
+ os.path.dirname(preflibapp.__file__), "static", datafile.file_path
+ ),
+ os.path.join(ds_dir, datafile.file_name),
+ )
return
instance.modification_type = datafile.modification_type
if datafile.relates_to:
instance.relates_to = datafile.relates_to.file_name
if datafile.related_files.all():
- instance.related_files = ','.join([df.file_name for df in datafile.related_files.all()])
+ instance.related_files = ",".join(
+ [df.file_name for df in datafile.related_files.all()]
+ )
instance.title = datafile.title
instance.description = datafile.description
instance.publication_date = str(datafile.publication_date)
diff --git a/preflibapp/models.py b/preflibapp/models.py
index 801a75e..fdfc293 100644
--- a/preflibapp/models.py
+++ b/preflibapp/models.py
@@ -8,39 +8,40 @@
# ================================
-# Models related to the data
+# Models related to the data
# ================================
+
class DataTag(models.Model):
- name = models.CharField(max_length=30,
- unique=True,
- verbose_name="name")
- description = models.TextField(
- verbose_name="Description of the tag")
+ name = models.CharField(max_length=30, unique=True, verbose_name="name")
+ description = models.TextField(verbose_name="Description of the tag")
class Meta:
- ordering = ['name']
+ ordering = ["name"]
def __str__(self):
return self.name
class DataSet(models.Model):
- name = models.CharField(max_length=50,
- unique=True,
- verbose_name="name")
- abbreviation = models.SlugField(max_length=30,
- unique=True,
- verbose_name="abbreviation of the dataset")
- series_number = models.SlugField(unique=True,
- verbose_name="series number of the dataset")
+ name = models.CharField(max_length=50, unique=True, verbose_name="name")
+ abbreviation = models.SlugField(
+ max_length=30, unique=True, verbose_name="abbreviation of the dataset"
+ )
+ series_number = models.SlugField(
+ unique=True, verbose_name="series number of the dataset"
+ )
zip_file_path = models.CharField(max_length=255, blank=True, null=True, unique=True)
zip_file_size = models.FloatField(default=0)
- description = models.TextField(blank=True, verbose_name="description of the dataset")
- tags = models.ManyToManyField(DataTag,
- blank=True,
- verbose_name="tags appliying to the dataset")
- required_citations = models.TextField(blank=True, verbose_name="HTML code describing the required citations")
+ description = models.TextField(
+ blank=True, verbose_name="description of the dataset"
+ )
+ tags = models.ManyToManyField(
+ DataTag, blank=True, verbose_name="tags applying to the dataset"
+ )
+ required_citations = models.TextField(
+ blank=True, verbose_name="HTML code describing the required citations"
+ )
selected_studies = models.TextField(blank=True)
publication_date = models.DateField()
modification_date = models.DateField()
@@ -50,32 +51,27 @@ def save(self, *args, **kwargs):
return super(DataSet, self).save(*args, **kwargs)
class Meta:
- ordering = ('series_number',)
+ ordering = ("series_number",)
def __str__(self):
return self.series_number + " - " + self.abbreviation
class Metadata(models.Model):
- name = models.CharField(max_length=100,
- unique=True)
- short_name = models.CharField(max_length=100,
- unique=True)
- category = models.CharField(choices=METADATACATEGORIES,
- max_length=100)
+ name = models.CharField(max_length=100, unique=True)
+ short_name = models.CharField(max_length=100, unique=True)
+ category = models.CharField(choices=METADATACATEGORIES, max_length=100)
description = models.TextField()
is_active = models.BooleanField()
is_displayed = models.BooleanField()
applies_to = models.CharField(max_length=100)
- upper_bounds = models.ManyToManyField('self',
- symmetrical=False,
- related_name="upperBoundedBy",
- blank=True)
+ upper_bounds = models.ManyToManyField(
+ "self", symmetrical=False, related_name="upperBoundedBy", blank=True
+ )
inner_module = models.CharField(max_length=100)
inner_function = models.CharField(max_length=100)
inner_type = models.CharField(max_length=100)
- search_widget = models.CharField(choices=SEARCHWIDGETS,
- max_length=100)
+ search_widget = models.CharField(choices=SEARCHWIDGETS, max_length=100)
search_question = models.TextField()
search_res_name = models.CharField(max_length=100)
order_priority = models.IntegerField()
@@ -84,33 +80,27 @@ class Meta:
ordering = ["order_priority", "name"]
def applies_to_list(self):
- return self.applies_to.split(',')
+ return self.applies_to.split(",")
def __str__(self):
return self.name
class DataFile(models.Model):
- dataset = models.ForeignKey(DataSet,
- on_delete=models.CASCADE,
- related_name='files')
- file_name = models.CharField(max_length=100,
- unique=True)
- data_type = models.CharField(choices=DATATYPES,
- max_length=5)
- metadata = models.ManyToManyField(Metadata,
- through="DataProperty",
- related_name="files")
- modification_type = models.CharField(choices=MODIFICATIONTYPES,
- max_length=20)
+ dataset = models.ForeignKey(DataSet, on_delete=models.CASCADE, related_name="files")
+ file_name = models.CharField(max_length=100, unique=True)
+ data_type = models.CharField(choices=DATATYPES, max_length=5)
+ metadata = models.ManyToManyField(
+ Metadata, through="DataProperty", related_name="files"
+ )
+ modification_type = models.CharField(choices=MODIFICATIONTYPES, max_length=20)
title = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
file_path = models.CharField(max_length=255, blank=True, unique=True)
file_size = models.FloatField(default=0)
- relates_to = models.ForeignKey('DataFile',
- on_delete=models.CASCADE,
- related_name='related_files',
- null=True)
+ relates_to = models.ForeignKey(
+ "DataFile", on_delete=models.CASCADE, related_name="related_files", null=True
+ )
publication_date = models.DateField()
modification_date = models.DateField()
@@ -119,20 +109,18 @@ def save(self, *args, **kwargs):
return super(DataFile, self).save(*args, **kwargs)
class Meta:
- ordering = ['file_name']
+ ordering = ["file_name"]
def short_name(self):
- return self.file_name.split('.')[0]
+ return self.file_name.split(".")[0]
def __str__(self):
return self.file_name
class DataProperty(models.Model):
- datafile = models.ForeignKey(DataFile,
- on_delete=models.CASCADE)
- metadata = models.ForeignKey(Metadata,
- on_delete=models.CASCADE)
+ datafile = models.ForeignKey(DataFile, on_delete=models.CASCADE)
+ metadata = models.ForeignKey(Metadata, on_delete=models.CASCADE)
value = models.CharField(max_length=100)
def typed_value(self):
@@ -149,12 +137,12 @@ def __str__(self):
# ===================================
-# Papers that are using PrefLib
+# Papers that are using PrefLib
# ===================================
+
class Paper(models.Model):
- name = models.CharField(max_length=50,
- unique=True)
+ name = models.CharField(max_length=50, unique=True)
title = models.TextField()
authors = models.TextField()
publisher = models.TextField()
@@ -162,16 +150,17 @@ class Paper(models.Model):
url = models.URLField(max_length=100)
class Meta:
- ordering = ['-year', 'title']
+ ordering = ["-year", "title"]
def __str__(self):
- return self.authors.split(' ')[1] + "_" + str(self.year)
+ return self.authors.split(" ")[1] + "_" + str(self.year)
# ==============================
-# Logs for the admin tasks
+# Logs for the admin tasks
# ==============================
+
class Log(models.Model):
log = models.TextField()
log_type = models.CharField(max_length=50)
@@ -179,8 +168,14 @@ class Log(models.Model):
publication_date = models.DateTimeField()
class Meta:
- ordering = ['-publication_date']
- unique_together = ('log_type', 'log_num')
+ ordering = ["-publication_date"]
+ unique_together = ("log_type", "log_num")
def __str__(self):
- return self.log_type + " #" + str(self.log_num) + " - " + str(self.publication_date)
+ return (
+ self.log_type
+ + " #"
+ + str(self.log_num)
+ + " - "
+ + str(self.publication_date)
+ )
diff --git a/preflibapp/urls.py b/preflibapp/urls.py
index 9f93d2a..d317d7f 100644
--- a/preflibapp/urls.py
+++ b/preflibapp/urls.py
@@ -7,25 +7,53 @@
def get_all_dataset_num():
for ds in DataSet.objects.all():
- yield {'dataset_num': ds.series_number}
+ yield {"dataset_num": ds.series_number}
-app_name = 'preflibapp'
+app_name = "preflibapp"
urlpatterns = [
- distill_path('', views.main, name='main', distill_file='index.html'),
- distill_re_path(r'^format/?$', views.data_format, name='data-format', distill_file="format.html"),
-
- distill_re_path(r'^datasets/?$', views.all_datasets, name='all-datasets', distill_file="datasets.html"),
- distill_re_path(r'^dataset/(?P[0-9]{5})/?$', views.dataset_view, name='dataset', distill_func=get_all_dataset_num, distill_file="dataset/{dataset_num}.html"),
-
- distill_re_path(r'^data/search/?$', views.data_search, name="data-search"),
-
- distill_re_path(r'^BoSc22/?$', views.boehmer_schaar, name="boehmer-schaar", distill_file="BoSc22.html"),
-
- distill_re_path(r'^tools/ivs/?$', views.tools_IVS, name='tools-IVS', distill_file="tools/ivs.html"),
- distill_re_path(r'^tools/kdg/?$', views.tools_KDG, name='tools-KDG', distill_file="tools/kdg.html"),
- distill_re_path(r'^tools/cris/?$', views.tools_CRIS, name='tools-CRIS', distill_file="tools/cris.html"),
-
- re_path(r'^login/?$', views.user_login, name='login'),
- re_path(r'^logout/?$', views.user_logout, name='logout'),
+ distill_path("", views.main, name="main", distill_file="index.html"),
+ distill_re_path(
+ r"^format/?$", views.data_format, name="data-format", distill_file="format.html"
+ ),
+ distill_re_path(
+ r"^datasets/?$",
+ views.all_datasets,
+ name="all-datasets",
+ distill_file="datasets.html",
+ ),
+ distill_re_path(
+ r"^dataset/(?P[0-9]{5})/?$",
+ views.dataset_view,
+ name="dataset",
+ distill_func=get_all_dataset_num,
+ distill_file="dataset/{dataset_num}.html",
+ ),
+ distill_re_path(r"^data/search/?$", views.data_search, name="data-search"),
+ distill_re_path(
+ r"^BoSc22/?$",
+ views.boehmer_schaar,
+ name="boehmer-schaar",
+ distill_file="BoSc22.html",
+ ),
+ distill_re_path(
+ r"^tools/ivs/?$",
+ views.tools_IVS,
+ name="tools-IVS",
+ distill_file="tools/ivs.html",
+ ),
+ distill_re_path(
+ r"^tools/kdg/?$",
+ views.tools_KDG,
+ name="tools-KDG",
+ distill_file="tools/kdg.html",
+ ),
+ distill_re_path(
+ r"^tools/cris/?$",
+ views.tools_CRIS,
+ name="tools-CRIS",
+ distill_file="tools/cris.html",
+ ),
+ re_path(r"^login/?$", views.user_login, name="login"),
+ re_path(r"^logout/?$", views.user_logout, name="logout"),
]
diff --git a/preflibapp/views.py b/preflibapp/views.py
index a3cee2b..6bced8e 100644
--- a/preflibapp/views.py
+++ b/preflibapp/views.py
@@ -30,7 +30,7 @@ def get_paginator(request, iterable, page_size=20, window_size=3, max_pages=15):
paginator = Paginator(iterable, page_size)
# Try to find the page number, default being 1
try:
- page = int(request.GET.get('page'))
+ page = int(request.GET.get("page"))
except TypeError:
page = 1
@@ -45,7 +45,9 @@ def get_paginator(request, iterable, page_size=20, window_size=3, max_pages=15):
pages_before.append("...")
for p in range(max(1, page - window_size), page):
pages_before.append(p)
- pages_after = list(range(page + 1, min(page + window_size + 1, paginator.num_pages + 1)))
+ pages_after = list(
+ range(page + 1, min(page + window_size + 1, paginator.num_pages + 1))
+ )
if page + window_size < paginator.num_pages - 1:
pages_after.append("...")
if page + window_size < paginator.num_pages:
@@ -58,13 +60,14 @@ def get_paginator(request, iterable, page_size=20, window_size=3, max_pages=15):
# ============
-# Renderer
+# Renderer
# ============
+
def my_render(request, template, args=None):
if args is None:
args = {}
- args['loginNextUrl'] = request.get_full_path
+ args["loginNextUrl"] = request.get_full_path
return render(request, template, args)
@@ -74,66 +77,73 @@ def error_render(request, template, status):
def error_400_view(request, exception):
- return error_render(request, '400.html', 400)
+ return error_render(request, "400.html", 400)
def error_403_view(request, exception):
- return error_render(request, '403.html', 403)
+ return error_render(request, "403.html", 403)
def error_404_view(request, exception):
- return error_render(request, '404.html', 404)
+ return error_render(request, "404.html", 404)
def error_500_view(request):
- return error_render(request, '500.html', 500)
+ return error_render(request, "500.html", 500)
# =========
# Views
# =========
+
@cache_page(CACHE_TIME)
def main(request):
nb_dataset = DataSet.objects.count()
nb_datafile = DataFile.objects.count()
- total_size = DataFile.objects.aggregate(Sum('file_size'))['file_size__sum']
- nb_datatype = DataFile.objects.values('data_type').distinct().count()
+ total_size = DataFile.objects.aggregate(Sum("file_size"))["file_size__sum"]
+ nb_datatype = DataFile.objects.values("data_type").distinct().count()
- (paginator, papers, page, pages_before, pages_after) = get_paginator(request, Paper.objects.all(), page_size=15)
+ (paginator, papers, page, pages_before, pages_after) = get_paginator(
+ request, Paper.objects.all(), page_size=15
+ )
- return my_render(request, os.path.join('preflib', 'index.html'), locals())
+ return my_render(request, os.path.join("preflib", "index.html"), locals())
@cache_page(CACHE_TIME)
def data_format(request):
all_tags = DataTag.objects.all()
- metadata_per_categories = [(c[1], Metadata.objects.filter(is_active=True, category=c[0])) for c in
- METADATACATEGORIES]
- return my_render(request, os.path.join('preflib', 'data_format.html'), locals())
+ metadata_per_categories = [
+ (c[1], Metadata.objects.filter(is_active=True, category=c[0]))
+ for c in METADATACATEGORIES
+ ]
+ return my_render(request, os.path.join("preflib", "data_format.html"), locals())
@cache_page(CACHE_TIME)
def all_datasets(request):
tags = set()
- datasets = DataSet.objects.filter().order_by('name')
+ datasets = DataSet.objects.filter().order_by("name")
dataset_info = []
for ds in datasets:
max_files_displayed = 7
files = list(ds.files.filter(related_files__isnull=True))
- dataset_info.append({
- "ds": ds,
- "timestamp": (ds.publication_date - datetime.date(2000, 1, 1)).days,
- "files": files[:max_files_displayed],
- "num_files": len(files),
- "num_hidden_files": max(0, len(files) - max_files_displayed),
- "zip_file": ds.zip_file_path,
- "zip_file_size": ds.zip_file_size,
- "tags": ",".join(ds.tags.values_list('name', flat=True))
- })
+ dataset_info.append(
+ {
+ "ds": ds,
+ "timestamp": (ds.publication_date - datetime.date(2000, 1, 1)).days,
+ "files": files[:max_files_displayed],
+ "num_files": len(files),
+ "num_hidden_files": max(0, len(files) - max_files_displayed),
+ "zip_file": ds.zip_file_path,
+ "zip_file_size": ds.zip_file_size,
+ "tags": ",".join(ds.tags.values_list("name", flat=True)),
+ }
+ )
for tag in ds.tags.all():
tags.add(tag)
- return my_render(request, os.path.join('preflib', 'dataset_all.html'), locals())
+ return my_render(request, os.path.join("preflib", "dataset_all.html"), locals())
@cache_page(CACHE_TIME)
@@ -141,8 +151,8 @@ def dataset_view(request, dataset_num):
dataset = get_object_or_404(DataSet, series_number=dataset_num)
data_files = dataset.files.all()
num_files = data_files.count()
- total_size = data_files.aggregate(Sum('file_size'))['file_size__sum']
- all_types = data_files.order_by('data_type').values_list('data_type').distinct()
+ total_size = data_files.aggregate(Sum("file_size"))["file_size__sum"]
+ all_types = data_files.order_by("data_type").values_list("data_type").distinct()
files_info = []
extra_files = []
@@ -155,7 +165,9 @@ def dataset_view(request, dataset_num):
# Getting the metadata value for each category
meta_per_category = {}
for prop in file.dataproperty_set.all():
- category_long_name = find_choice_value(METADATACATEGORIES, prop.metadata.category)
+ category_long_name = find_choice_value(
+ METADATACATEGORIES, prop.metadata.category
+ )
if category_long_name in meta_per_category:
meta_per_category[category_long_name].append(prop)
else:
@@ -174,7 +186,7 @@ def dataset_view(request, dataset_num):
with open(finders.find(file.file_path), "r", encoding="utf-8") as f:
for line in f.readlines():
global_index += 1
- if line.startswith('#'):
+ if line.startswith("#"):
if meta_index is not None:
if meta_index <= 15:
meta_lines.append((global_index, line.strip()))
@@ -184,7 +196,9 @@ def dataset_view(request, dataset_num):
meta_index = None
else:
if pref_index <= 10:
- pref_lines.append((global_index, line.strip().replace(',', ', ')))
+ pref_lines.append(
+ (global_index, line.strip().replace(",", ", "))
+ )
pref_index += 1
else:
pref_lines.append(("...", "..."))
@@ -192,14 +206,14 @@ def dataset_view(request, dataset_num):
lines = meta_lines + pref_lines
file_dict["preview"] = lines
files_info.append(file_dict)
- return my_render(request, os.path.join('preflib', 'dataset.html'), locals())
+ return my_render(request, os.path.join("preflib", "dataset.html"), locals())
def data_search(request):
print(request.POST)
types = copy.deepcopy(DATATYPES)
- types.remove(('dat', 'extra data file'))
- types.remove(('csv', 'comma-separated values'))
+ types.remove(("dat", "extra data file"))
+ types.remove(("csv", "comma-separated values"))
modification_types = MODIFICATIONTYPES
metadatas = Metadata.objects.filter(is_active=True, is_displayed=True)
@@ -210,11 +224,18 @@ def data_search(request):
if m.search_widget == "range":
props = DataProperty.objects.filter(metadata=m)
if props.exists():
- props = props.annotate(float_value=Cast('value', models.FloatField()))
- max_value = ceil(props.aggregate(Max('float_value'))['float_value__max'])
- min_value = floor(props.aggregate(Min('float_value'))['float_value__min'])
- intermediate_value = floor((max_value - min_value) * 0.3) if max_value > 30 else floor(
- (max_value - min_value) * 0.5)
+ props = props.annotate(float_value=Cast("value", models.FloatField()))
+ max_value = ceil(
+ props.aggregate(Max("float_value"))["float_value__max"]
+ )
+ min_value = floor(
+ props.aggregate(Min("float_value"))["float_value__min"]
+ )
+ intermediate_value = (
+ floor((max_value - min_value) * 0.3)
+ if max_value > 30
+ else floor((max_value - min_value) * 0.5)
+ )
metadata_slider_values[m] = (min_value, intermediate_value, max_value)
# If the min and max are equal, filtering on that metadata is useless so we remove it
@@ -229,93 +250,117 @@ def data_search(request):
print(metadatas)
# This is to save the POST data when we change to a different page of the results
- if request.method != 'POST' and 'page' in request.GET:
- if 'search_datafiles_POST' in request.session:
- request.POST = request.session['search_datafiles_POST']
- request.method = 'POST'
+ if request.method != "POST" and "page" in request.GET:
+ if "search_datafiles_POST" in request.session:
+ request.POST = request.session["search_datafiles_POST"]
+ request.method = "POST"
all_files = DataFile.objects.filter(data_type__in=[t[0] for t in types])
- if request.method == 'POST':
- request.session['search_datafiles_POST'] = request.POST
+ if request.method == "POST":
+ request.session["search_datafiles_POST"] = request.POST
datatype_filter = [t[0] for t in types]
for t in types:
- if request.POST.get(t[0] + 'selector') == "no":
+ if request.POST.get(t[0] + "selector") == "no":
if t[0] in datatype_filter:
datatype_filter.remove(t[0])
- elif request.POST.get(t[0] + 'selector') == "yes":
+ elif request.POST.get(t[0] + "selector") == "yes":
datatype_filter = [x for x in datatype_filter if x == t[0]]
all_files = all_files.filter(data_type__in=datatype_filter)
modiftype_filer = [mt[0] for mt in modification_types]
for mt in modification_types:
- if request.POST.get(mt[0] + 'selector') == "no":
+ if request.POST.get(mt[0] + "selector") == "no":
if mt[0] in modiftype_filer:
modiftype_filer.remove(mt[0])
- elif request.POST.get(mt[0] + 'selector') == "yes":
+ elif request.POST.get(mt[0] + "selector") == "yes":
modiftype_filer = [x for x in modiftype_filer if x == mt[0]]
all_files = all_files.filter(modification_type__in=modiftype_filer)
for m in metadatas:
if m.search_widget == "ternary":
- if request.POST.get(m.short_name + 'selector') == "no":
+ if request.POST.get(m.short_name + "selector") == "no":
property_query = DataProperty.objects.filter(metadata=m, value=True)
- all_files = all_files.exclude(dataproperty__in=models.Subquery(property_query.values('pk')))
- elif request.POST.get(m.short_name + 'selector') == "yes":
+ all_files = all_files.exclude(
+ dataproperty__in=models.Subquery(property_query.values("pk"))
+ )
+ elif request.POST.get(m.short_name + "selector") == "yes":
property_query = DataProperty.objects.filter(metadata=m, value=True)
- all_files = all_files.filter(dataproperty__in=models.Subquery(property_query.values('pk')))
+ all_files = all_files.filter(
+ dataproperty__in=models.Subquery(property_query.values("pk"))
+ )
elif m.search_widget == "range":
print(m.short_name)
- property_query_min = DataProperty.objects.filter(metadata=m).annotate(
- float_value=Cast('value', models.FloatField())).filter(
- float_value__lt=float(request.POST.get(m.short_name + '_slider_value_min')))
- all_files = all_files.exclude(dataproperty__in=models.Subquery(property_query_min.values('pk')))
- property_query_max = DataProperty.objects.filter(metadata=m).annotate(
- float_value=Cast('value', models.FloatField())).filter(
- float_value__gt=float(request.POST.get(m.short_name + '_slider_value_max')))
-
- all_files = all_files.exclude(dataproperty__in=models.Subquery(property_query_max.values('pk')))
-
- all_files = all_files.order_by('file_name', 'data_type')
- (paginator, datafiles, page, pages_before, pages_after) = get_paginator(request, all_files, page_size=40)
- return my_render(request, os.path.join('preflib', 'datasearch.html'), locals())
+ property_query_min = (
+ DataProperty.objects.filter(metadata=m)
+ .annotate(float_value=Cast("value", models.FloatField()))
+ .filter(
+ float_value__lt=float(
+ request.POST.get(m.short_name + "_slider_value_min")
+ )
+ )
+ )
+ all_files = all_files.exclude(
+ dataproperty__in=models.Subquery(property_query_min.values("pk"))
+ )
+ property_query_max = (
+ DataProperty.objects.filter(metadata=m)
+ .annotate(float_value=Cast("value", models.FloatField()))
+ .filter(
+ float_value__gt=float(
+ request.POST.get(m.short_name + "_slider_value_max")
+ )
+ )
+ )
+
+ all_files = all_files.exclude(
+ dataproperty__in=models.Subquery(property_query_max.values("pk"))
+ )
+
+ all_files = all_files.order_by("file_name", "data_type")
+ (paginator, datafiles, page, pages_before, pages_after) = get_paginator(
+ request, all_files, page_size=40
+ )
+ return my_render(request, os.path.join("preflib", "datasearch.html"), locals())
@cache_page(CACHE_TIME)
def boehmer_schaar(request):
- return my_render(request, os.path.join('preflib', 'boehmer_schaar.html'))
+ return my_render(request, os.path.join("preflib", "boehmer_schaar.html"))
# Tools views
@cache_page(CACHE_TIME)
def tools(request):
- return my_render(request, os.path.join('preflib', 'tools.html'))
+ return my_render(request, os.path.join("preflib", "tools.html"))
# Tools views
@cache_page(CACHE_TIME)
def tools_IVS(request):
- return my_render(request, os.path.join('preflib', 'toolsivs.html'))
+ return my_render(request, os.path.join("preflib", "toolsivs.html"))
# Tools views
@cache_page(CACHE_TIME)
def tools_KDG(request):
- return my_render(request, os.path.join('preflib', 'toolskdg.html'))
+ return my_render(request, os.path.join("preflib", "toolskdg.html"))
# Tools views
@cache_page(CACHE_TIME)
def tools_CRIS(request):
- return my_render(request, os.path.join('preflib', 'toolscris.html'))
+ return my_render(request, os.path.join("preflib", "toolscris.html"))
# Paper views
@cache_page(CACHE_TIME)
def papers(request):
- (paginator, papers, page, pages_before, pages_after) = get_paginator(request, Paper.objects.all(), page_size=30)
- return my_render(request, os.path.join('preflib', 'papers.html'), locals())
+ (paginator, papers, page, pages_before, pages_after) = get_paginator(
+ request, Paper.objects.all(), page_size=30
+ )
+ return my_render(request, os.path.join("preflib", "papers.html"), locals())
# User stuff
@@ -323,7 +368,7 @@ def user_login(request):
print(request.POST)
error = False
# The variable that get the next page if there is one
- request_next = request.POST.get('next', request.GET.get('next', ''))
+ request_next = request.POST.get("next", request.GET.get("next", ""))
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
@@ -341,7 +386,7 @@ def user_login(request):
error = True
else:
form = LoginForm()
- return my_render(request, os.path.join('preflib', 'userlogin.html'), locals())
+ return my_render(request, os.path.join("preflib", "userlogin.html"), locals())
def user_logout(request):
@@ -349,4 +394,4 @@ def user_logout(request):
raise Http404
if request.user.is_authenticated:
logout(request)
- return redirect('preflibapp:main')
+ return redirect("preflibapp:main")
diff --git a/quicksetup.py b/quicksetup.py
index 25c12ff..ce331f1 100644
--- a/quicksetup.py
+++ b/quicksetup.py
@@ -3,7 +3,8 @@
if __name__ == "__main__":
## Write the settings.py file that we do not git for security reasons
with open("preflib/local_settings.py", "w") as f:
- f.write("""
+ f.write(
+ """
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
@@ -36,7 +37,8 @@
# Path to the unix convert command for the image handling
CONVERT_PATH = 'convert'
- """)
+ """
+ )
f.close()
## Create the migration folder and run the initial migration to set up the