Skip to content

Commit

Permalink
Merge pull request #726 from OliverCullimore/selenium-test-video-names
Browse files Browse the repository at this point in the history
feat: #725 Add names to selenium test videos
  • Loading branch information
OliverCullimore authored May 19, 2024
2 parents c5bac92 + 377e3d8 commit 8cfea75
Show file tree
Hide file tree
Showing 46 changed files with 205 additions and 172 deletions.
2 changes: 1 addition & 1 deletion uk_bin_collection/tests/test_common_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def test_contains_date_with_mixed_content():


def test_create_webdriver_local():
result = create_webdriver(None, headless=True, user_agent="FireFox")
result = create_webdriver(None, headless=True, user_agent="FireFox", session_name="test-session")
assert result.name in ["chrome","chrome-headless-shell"]


Expand Down
5 changes: 4 additions & 1 deletion uk_bin_collection/uk_bin_collection/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,14 +258,15 @@ def contains_date(string, fuzzy=False) -> bool:


def create_webdriver(
web_driver: str = None, headless: bool = True, user_agent: str = None
web_driver: str = None, headless: bool = True, user_agent: str = None, session_name: str = None
) -> webdriver.Chrome:
"""
Create and return a Chrome WebDriver configured for optional headless operation.
:param web_driver: URL to the Selenium server for remote web drivers. If None, a local driver is created.
:param headless: Whether to run the browser in headless mode.
:param user_agent: Optional custom user agent string.
:param session_name: Optional custom session name string.
:return: An instance of a Chrome WebDriver.
:raises WebDriverException: If the WebDriver cannot be created.
"""
Expand All @@ -279,6 +280,8 @@ def create_webdriver(
if user_agent:
options.add_argument(f"--user-agent={user_agent}")
options.add_experimental_option("excludeSwitches", ["enable-logging"])
if session_name and web_driver:
options.set_capability("se:name", session_name)

try:
if web_driver:
Expand Down
11 changes: 6 additions & 5 deletions uk_bin_collection/uk_bin_collection/councils/ArunCouncil.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
import time

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from bs4 import BeautifulSoup
from selenium.webdriver.support.ui import Select, WebDriverWait

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
Expand All @@ -28,7 +27,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
user_postcode = kwargs.get("postcode")
headless = kwargs.get("headless")
web_driver = kwargs.get("web_driver")
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
page = "https://www1.arun.gov.uk/when-are-my-bins-collected/"
check_paon(user_paon)
check_postcode(user_postcode)
Expand Down Expand Up @@ -78,7 +77,9 @@ def parse_data(self, page: str, **kwargs) -> dict:
collection_type = (
row.find("th", class_="govuk-table__header").text.strip().split(" ")
)[0]
collection_date = row.find("td", class_="govuk-table__cell").text.strip()
collection_date = row.find(
"td", class_="govuk-table__cell"
).text.strip()

# Append the information to the data structure
data["bins"].append(
Expand Down
5 changes: 2 additions & 3 deletions uk_bin_collection/uk_bin_collection/councils/BarnetCouncil.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select, WebDriverWait

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
Expand Down Expand Up @@ -70,7 +69,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
check_paon(user_paon)
headless = kwargs.get("headless")
web_driver = kwargs.get("web_driver")
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
page = "https://account.barnet.gov.uk/Forms/Home/Redirector/Index/?id=6a2ac067-3322-46e5-96e4-16c0c214454a&mod=OA&casetype=BAR&formname=BNTCOLDATE"
driver.get(page)

Expand Down
10 changes: 5 additions & 5 deletions uk_bin_collection/uk_bin_collection/councils/BexleyCouncil.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
from bs4 import BeautifulSoup
import time
from datetime import datetime

from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys

import time
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber


Expand All @@ -35,7 +35,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
headless = kwargs.get("headless")

# Create Selenium webdriver
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(page)

# If you bang in the house number (or property name) and postcode in the box it should find your property
Expand Down
12 changes: 7 additions & 5 deletions uk_bin_collection/uk_bin_collection/councils/BlackburnCouncil.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import json
import logging
import ssl
from collections import OrderedDict
from datetime import datetime
from bs4 import BeautifulSoup

import requests
import urllib3
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass
import ssl
import urllib3
import logging


class CustomHttpAdapter(requests.adapters.HTTPAdapter):
Expand Down Expand Up @@ -48,7 +50,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
f"https://mybins.blackburn.gov.uk/api/mybins/getbincollectiondays?uprn={uprn}&month={current_month}"
f"&year={current_year}"
)
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(url)

soup = BeautifulSoup(driver.page_source, "html.parser")
Expand Down
9 changes: 5 additions & 4 deletions uk_bin_collection/uk_bin_collection/councils/BoltonCouncil.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from bs4 import BeautifulSoup
import time
from datetime import datetime

from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys

import time
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass

Expand Down Expand Up @@ -34,7 +35,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
# Get our initial session running
page = "https://carehomes.bolton.gov.uk/bins.aspx"

driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(page)

# If you bang in the house number (or property name) and postcode in the box it should find your property
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
import re
import requests
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass

# This script pulls (in one hit) the data from Bromley Council Bins Data
import datetime
import re
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
Expand All @@ -35,7 +35,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
postcode = kwargs.get("postcode")
web_driver = kwargs.get("web_driver")
headless = kwargs.get("headless")
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(kwargs.get("url"))

wait = WebDriverWait(driver, 60)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
# This script pulls (in one hit) the data from Bromley Council Bins Data
import datetime
from bs4 import BeautifulSoup
import time
from datetime import datetime

from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time

from dateutil.relativedelta import relativedelta
from bs4 import BeautifulSoup
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass

Expand All @@ -35,7 +35,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
data = {"bins": []}

# Get our initial session running
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(kwargs.get("url"))

wait = WebDriverWait(driver, 30)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
check_postcode(user_postcode)

# Create Selenium webdriver
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(page)

# Populate postcode field
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pandas as pd
import time

import pandas as pd
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
Expand Down Expand Up @@ -42,7 +43,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
headless = kwargs.get("headless")

# Create Selenium webdriver
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(page)

# Enter postcode in text box and wait
Expand Down
19 changes: 9 additions & 10 deletions uk_bin_collection/uk_bin_collection/councils/CalderdaleCouncil.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
import requests
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass

# This script pulls (in one hit) the data from Bromley Council Bins Data
import datetime
from bs4 import BeautifulSoup
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
Expand All @@ -40,7 +39,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
data = {"bins": []}

# Get our initial session running
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(kwargs.get("url"))

wait = WebDriverWait(driver, 30)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
import re
import requests
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass

# This script pulls (in one hit) the data from Bromley Council Bins Data
import datetime
import re
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
Expand All @@ -35,7 +35,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
user_paon = kwargs.get("paon")
web_driver = kwargs.get("web_driver")
headless = kwargs.get("headless")
driver = create_webdriver(web_driver, headless)
driver = create_webdriver(web_driver, headless, None, __name__)
url = kwargs.get("url")

driver.execute_script(f"window.location.href='{url}'")
Expand Down
Loading

0 comments on commit 8cfea75

Please sign in to comment.