Skip to content

Commit

Permalink
Extend Ruff to scripts dir
Browse files Browse the repository at this point in the history
  • Loading branch information
ajparsons committed Sep 25, 2024
1 parent bcfb35a commit ace1844
Show file tree
Hide file tree
Showing 5 changed files with 206 additions and 168 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ build-backend = "poetry.core.masonry.api"


[tool.ruff]
extend-exclude = ["migrations", "commonlib", "scripts"]
extend-exclude = ["migrations", "commonlib", "scripts/historic"]

[tool.ruff.lint]
select = [
Expand Down
7 changes: 2 additions & 5 deletions scripts/division_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
"""

import re
import sys
from enum import Enum
from pathlib import Path
from typing import cast
Expand All @@ -17,9 +15,9 @@
import MySQLdb
import pandas as pd
import rich_click as click
from pylib.mysociety import config
from rich import print
from rich.prompt import Prompt
from pylib.mysociety import config

repository_path = Path(__file__).parent.parent

Expand All @@ -40,7 +38,6 @@ class TitlePriority(str, Enum):

@classmethod
def get_priority(cls, priority: str) -> int:

lookup = {
cls.ORIGINAL_HEADER: 1,
cls.PARLIAMENT_DESCRIBED: 5,
Expand Down Expand Up @@ -104,7 +101,7 @@ def df_to_db(df: pd.DataFrame, *, new_priority: TitlePriority, verbose: bool = F

# get all divisions with a title_priority below or equal to current priority
existing_df = pd.read_sql(
f"SELECT division_id, title_priority FROM divisions",
"SELECT division_id, title_priority FROM divisions",
db_connection,
)
existing_df["int_title_priority"] = existing_df["title_priority"].apply(
Expand Down
124 changes: 65 additions & 59 deletions scripts/download_parliament_portraits.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
'''
"""
Python 3
Downloads thumbnails
Expand All @@ -9,80 +9,86 @@
Pillow
everypolitician-popolo
'''
"""

import os
import csv
from os.path import exists
from tempfile import gettempdir
from PIL import Image
from urllib.request import urlretrieve
from os.path import exists

from PIL import Image
from popolo_data.importer import Popolo

small_image_folder = r"..\www\docs\images\mps"
large_image_folder = r"..\www\docs\images\mpsL"


def get_id_lookup():
"""
create id lookup from popolo file
convert datadotparl_id to parlparse
"""
people_url = "https://github.com/mysociety/parlparse/raw/master/members/people.json"
pop = Popolo.from_url(people_url)
count = 0
lookup = {}
print ("Creating id lookup")
for p in pop.persons:
id = p.id
datadotparl = p.identifier_value("datadotparl_id")
if datadotparl:
lookup[datadotparl] = id[-5:]
count += 1
print (count, len(pop.persons))
return lookup

image_format = "https://members-api.parliament.uk/api/Members/{0}/Portrait?CropType=ThreeFour"
"""
create id lookup from popolo file
convert datadotparl_id to parlparse
"""
people_url = "https://github.com/mysociety/parlparse/raw/master/members/people.json"
pop = Popolo.from_url(people_url)
count = 0
lookup = {}
print("Creating id lookup")
for p in pop.persons:
id = p.id
datadotparl = p.identifier_value("datadotparl_id")
if datadotparl:
lookup[datadotparl] = id[-5:]
count += 1
print(count, len(pop.persons))
return lookup


image_format = (
"https://members-api.parliament.uk/api/Members/{0}/Portrait?CropType=ThreeFour"
)


def get_image_url(id):
return image_format.format(id)
return image_format.format(id)


def download_and_resize(mp_id, parlparse):
filename = "{0}.jpg".format(parlparse)
alt_filename = "{0}.jpeg".format(parlparse)
small_path = os.path.join(small_image_folder, filename)
small_path_alt = os.path.join(small_image_folder, alt_filename)
large_path = os.path.join(large_image_folder, filename)
temp_path = os.path.join(gettempdir(),"{0}.jpg".format(mp_id))
image_url = get_image_url(mp_id)
try:
urlretrieve(image_url, temp_path)
except Exception:
return None
print ("downloaded: {0}".format(image_url))
image = Image.open(temp_path)
if exists(large_path) is False:
image.thumbnail((120, 160))
image.save(large_path, quality=95)
if not exists(small_path) and not exists(small_path_alt):
image.thumbnail((60, 80))
image.save(small_path, quality=95)
image.close()
os.remove(temp_path)
filename = "{0}.jpg".format(parlparse)
alt_filename = "{0}.jpeg".format(parlparse)
small_path = os.path.join(small_image_folder, filename)
small_path_alt = os.path.join(small_image_folder, alt_filename)
large_path = os.path.join(large_image_folder, filename)
temp_path = os.path.join(gettempdir(), "{0}.jpg".format(mp_id))
image_url = get_image_url(mp_id)
try:
urlretrieve(image_url, temp_path)
except Exception:
return None
print("downloaded: {0}".format(image_url))
image = Image.open(temp_path)
if exists(large_path) is False:
image.thumbnail((120, 160))
image.save(large_path, quality=95)
if not exists(small_path) and not exists(small_path_alt):
image.thumbnail((60, 80))
image.save(small_path, quality=95)
image.close()
os.remove(temp_path)


def get_images():
"""
fetch image if available
"""
lookup = get_id_lookup()
for datadotparl, parlparse in lookup.items():

filename = "{0}.jpg".format(parlparse)
small_path = os.path.join(small_image_folder, filename)
large_path = os.path.join(large_image_folder, filename)
if exists(large_path) is False or exists(small_path) is False:
download_and_resize(datadotparl, parlparse)
"""
fetch image if available
"""
lookup = get_id_lookup()

for datadotparl, parlparse in lookup.items():
filename = "{0}.jpg".format(parlparse)
small_path = os.path.join(small_image_folder, filename)
large_path = os.path.join(large_image_folder, filename)
if exists(large_path) is False or exists(small_path) is False:
download_and_resize(datadotparl, parlparse)


if __name__ == "__main__":
get_images()
get_images()
Loading

0 comments on commit ace1844

Please sign in to comment.