Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into dartford
Browse files Browse the repository at this point in the history
  • Loading branch information
5ila5 committed Jan 6, 2025
2 parents 1301b40 + 8c93106 commit b6097af
Show file tree
Hide file tree
Showing 21 changed files with 730 additions and 78 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2143,6 +2143,7 @@ If your service provider is not listed, feel free to open a [source request issu
- [Braintree District Council](/doc/source/braintree_gov_uk.md) / braintree.gov.uk
- [Breckland Council](/doc/source/breckland_gov_uk.md) / breckland.gov.uk/mybreckland
- [Brent Council](/doc/ics/brent_gov_uk.md) / brent.gov.uk
- [Bridgend County Borough Council](/doc/source/bridgend_gov_uk.md) / bridgend.gov.uk
- [Bristol City Council](/doc/source/bristol_gov_uk.md) / bristol.gov.uk
- [Broadland District Council](/doc/source/south_norfolk_and_broadland_gov_uk.md) / area.southnorfolkandbroadland.gov.uk
- [Bromsgrove City Council](/doc/source/bromsgrove_gov_uk.md) / bromsgrove.gov.uk
Expand Down Expand Up @@ -2173,6 +2174,7 @@ If your service provider is not listed, feel free to open a [source request issu
- [Coventry City Council](/doc/source/coventry_gov_uk.md) / coventry.gov.uk
- [Crawley Borough Council (myCrawley)](/doc/source/crawley_gov_uk.md) / crawley.gov.uk
- [Croydon Council](/doc/source/croydon_gov_uk.md) / croydon.gov.uk
- [Cumberland Council](/doc/source/cumberland_gov_uk.md) / cumberland.gov.uk
- [Dacorum Borough Council](/doc/source/dacorum_gov_uk.md) / dacorum.gov.uk
- [Darlington Borough Council](/doc/source/darlington_gov_uk.md) / darlington.gov.uk
- [Dartford Borough Council](/doc/source/dartford_gov_uk.md) / dartford.gov.uk
Expand Down Expand Up @@ -2293,6 +2295,7 @@ If your service provider is not listed, feel free to open a [source request issu
- [Rhondda Cynon Taf County Borough Council](/doc/source/rctcbc_gov_uk.md) / rctcbc.gov.uk
- [Richmondshire District Council](/doc/source/richmondshire_gov_uk.md) / richmondshire.gov.uk
- [Rochdale Borough Council](/doc/source/rochdale_gov_uk.md) / rochdale.gov.uk
- [Rother District Council](/doc/source/rother_gov_uk.md) / rother.gov.uk
- [Rotherham](/doc/source/apps_imactivate_com.md) / rotherham.gov.uk
- [Rotherham Metropolitan Borough Council](/doc/source/rotherham_gov_uk.md) / rotherham.gov.uk
- [Royal Borough Of Greenwich](/doc/source/royalgreenwich_gov_uk.md) / royalgreenwich.gov.uk
Expand Down Expand Up @@ -2390,6 +2393,7 @@ If your service provider is not listed, feel free to open a [source request issu
- [City of McKinney, TX](/doc/ics/recollect.md) / mckinneytexas.org
- [City of Oklahoma City (unofficial)](/doc/source/okc_gov.md) / okc.gov
- [City of Pittsburgh](/doc/source/pgh_st.md) / pgh.st
- [Davenport, Iowa, USA](/doc/ics/recollect.md) / davenportiowa.com
- [Hardin Sanitation, Idaho, USA](/doc/ics/recollect.md) / hardinsanitation.com
- [Louisville, Kentucky, USA](/doc/source/recyclecoach_com.md) / recyclecoach.com/cities/usa-ky-city-of-louisville
- [Minneapolis MN USA](/doc/source/apps_ci_minneapolis_mn_us.md) / minneapolismn.gov
Expand Down
26 changes: 26 additions & 0 deletions custom_components/waste_collection_schedule/sources.json
Original file line number Diff line number Diff line change
Expand Up @@ -13811,6 +13811,12 @@
"default_params": {},
"id": "ics_brent_gov_uk"
},
{
"title": "Bridgend County Borough Council",
"module": "bridgend_gov_uk",
"default_params": {},
"id": "bridgend_gov_uk"
},
{
"title": "Bristol City Council",
"module": "bristol_gov_uk",
Expand Down Expand Up @@ -13995,6 +14001,12 @@
"default_params": {},
"id": "croydon_gov_uk"
},
{
"title": "Cumberland Council",
"module": "cumberland_gov_uk",
"default_params": {},
"id": "cumberland_gov_uk"
},
{
"title": "Dacorum Borough Council",
"module": "dacorum_gov_uk",
Expand Down Expand Up @@ -14731,6 +14743,12 @@
"default_params": {},
"id": "rochdale_gov_uk"
},
{
"title": "Rother District Council",
"module": "rother_gov_uk",
"default_params": {},
"id": "rother_gov_uk"
},
{
"title": "Rotherham",
"module": "apps_imactivate_com",
Expand Down Expand Up @@ -15328,6 +15346,14 @@
"default_params": {},
"id": "pgh_st"
},
{
"title": "Davenport, Iowa, USA",
"module": "ics",
"default_params": {
"split_at": "\\, (?:and )?|(?: and )"
},
"id": "ics_recollect"
},
{
"title": "Hardin Sanitation, Idaho, USA",
"module": "ics",
Expand Down
75 changes: 73 additions & 2 deletions custom_components/waste_collection_schedule/translations/de.json

Large diffs are not rendered by default.

77 changes: 75 additions & 2 deletions custom_components/waste_collection_schedule/translations/en.json

Large diffs are not rendered by default.

75 changes: 73 additions & 2 deletions custom_components/waste_collection_schedule/translations/it.json

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# There was an ICS source but the ICS file was not stored permanently and would be removed after a few days.
import requests
from bs4 import BeautifulSoup, Tag
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS

Expand All @@ -13,19 +12,21 @@
"strasse": "Drosselgasse",
},
"Milow Friedhofstr.": {"ort": "Milow", "strasse": "Friedhofstr."},
"Falkensee Ahornstr.": {"ort": "Falkensee", "strasse": "Ahornstr."},
"Falkensee complex street name": {
"ort": "Falkensee",
"strasse": "Karl-Marx-Str. (von Friedrich-Hahn-Str. bis Am Schlaggraben)",
},
}


ICON_MAP = {
"mülltonne": "mdi:trash-can",
"bio-tonne": "mdi:leaf",
"papier": "mdi:package-variant",
"gelbe": "mdi:recycle",
}


API_URL = "https://www.abfall-havelland.de//groups/public/modules/ajax_tourenplan.php"
BASE_URL = "https://www.abfall-havelland.de/"
API_URL = "https://www.abfall-havelland.de/ics.php"


class Source:
Expand All @@ -37,19 +38,9 @@ def __init__(self, ort: str, strasse: str):
def fetch(self) -> list[Collection]:
args = {"city": self._ort, "street": self._strasse}

# get json file
# ics content
r = requests.get(API_URL, params=args)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
ics_link_tag = soup.find("a", id="ical")
if not isinstance(ics_link_tag, Tag):
raise Exception("No ics link found")
ics_link = ics_link_tag.attrs["onclick"].split("'")[1]
if not isinstance(ics_link, str):
raise Exception("No ics link found")
r = requests.get(BASE_URL + ics_link)
r.raise_for_status()
r.encoding = "utf-8"
dates = self._ics.convert(r.text)
entries = []
for d in dates:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection

TITLE = "Bridgend County Borough Council"
DESCRIPTION = "Source for bridgend.gov.uk"
URL = "https://www.bridgend.gov.uk/"
TEST_CASES: dict = {
"test_001": {"uprn": "100100479873"},
"test_002": {"uprn": 10032996088},
"test_003": {"uprn": "10090813443"},
}
ICON_MAP: dict = {
"Refuse": "mdi:trash-can",
"Recycling": "mdi:recycle",
}
HEADERS: dict = {"user-agent": "Mozilla/5.0"}

HOW_TO_GET_ARGUMENTS_DESCRIPTION: dict = {
"en": "an easy way to discover your Unique Property Reference Number (UPRN) is by going to https://www.findmyaddress.co.uk/ and entering in your address details.",
}
PARAM_TRANSLATIONS: dict = {
"en": {
"uprn": "Unique Property Reference Number (UPRN)",
}
}
PARAM_DESCRIPTIONS: dict = {
"en": {
"uprn": "Unique Property Reference Number (UPRN)",
}
}


class Source:
def __init__(self, uprn: str | int):
self._uprn = str(uprn)

def fetch(self) -> list[Collection]:
s = requests.Session()
r = s.get(
f"https://bridgendportal.azurewebsites.net/property/{self._uprn}",
headers=HEADERS,
)
soup: BeautifulSoup = BeautifulSoup(r.content, "html.parser")

tds: list = soup.find_all("td", {"class": ["service-name", "next-service"]})
waste_types: list = tds[0::2]
waste_dates: list = tds[1::2]

entries: list = []
for i in range(len(waste_types)):
waste_type = waste_types[i].text.split(" ")[0].replace("\n", "").strip()
waste_date = (
waste_dates[i]
.text.split(" ")[1]
.replace("\t", "")
.replace("Service\n", "")
.strip()
)
entries.append(
Collection(
date=datetime.strptime(waste_date, "%d/%m/%Y").date(),
t=waste_type,
icon=ICON_MAP.get(waste_type),
)
)

return entries
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import datetime
import json
import re

import requests
Expand All @@ -15,7 +14,7 @@
"Am Schwarzen Berge": {"street": "am Schwarzen Berge "},
}

API_URL = "https://www.cederbaum.de/blaue-tonne/abfuhrkalender"
API_URL = "https://www.cederbaum.de/blaue-tonne/"
ICON_MAP = {
"PAPER": "mdi:newspaper",
}
Expand Down Expand Up @@ -54,7 +53,7 @@ def get_street_id(self):
value = option.get("value")
text = option.get_text()
if text.lower().strip() == self._street.lower().strip():
self.street_id = value
self.street_id = int(value)
break

def get_collection_data(self):
Expand All @@ -63,15 +62,23 @@ def get_collection_data(self):

script_tags = self.page_source.find_all("script")
script_with_text = [tag for tag in script_tags if tag.string]
pattern = re.compile(r"var rate = (\{.*?\});")
pattern = re.compile(r"var rate = \[(.*?)\];")

# the dates are stored in a hardcoded js array
raw_date_text = None
for script_tag in script_with_text:
match = pattern.search(script_tag.string)
if match:
var_content = match.group(1)
self.collection_data = json.loads(var_content)
raw_date_text = match.group(1)
break

if not raw_date_text:
raise ValueError("Raw date text not found")

# one list of dates per location
raw_dates = [text.strip('"') for text in raw_date_text.split('","')]
self.collection_data = [dates.split(",") for dates in raw_dates]

def fetch(self):
self.fetch_page_source()
self.get_street_id()
Expand All @@ -81,11 +88,9 @@ def fetch(self):
raise ValueError("No collection data found")

entries = []
waste_dates = self.collection_data[self.street_id]["Termine"]
waste_dates = self.collection_data[self.street_id] # type: ignore
for waste_date in waste_dates:
date = datetime.datetime.strptime(
waste_dates[waste_date]["Termin"], "%d.%m.%Y"
)
date = datetime.datetime.strptime(waste_date, "%d.%m.%Y")

entries.append(
Collection(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection # type: ignore[attr-defined]

TITLE = "Cumberland Council"
DESCRIPTION = "Source for cumberland.gov.uk services for Cumberland Council, UK."
URL = "https://cumberland.gov.uk"
TEST_CASES = {
"Test_001": {"postcode": "CA28 7QS", "uprn": "100110319463"},
"Test_002": {"postcode": "CA28 8LG", "uprn": 100110320734},
"Test_003": {"postcode": "CA28 6SW", "uprn": "10000895390"},
}
ICON_MAP = {
"Recycling": "mdi:recycle",
"Domestic Waste": "mdi:trash-can",
}
HEADERS = {"user-agent": "Mozilla/5.0"}
API_URLS = {
"TOKEN": "https://waste.cumberland.gov.uk/renderform?t=25&k=E43CEB1FB59F859833EF2D52B16F3F4EBE1CAB6A",
"SCHEDULE": "https://waste.cumberland.gov.uk/renderform/Form",
}

HOW_TO_GET_ARGUMENTS_DESCRIPTION = {
"en": "An easy way to discover your Unique Property Reference Number (UPRN) is by going to https://www.findmyaddress.co.uk/ and entering in your address details.",
}
PARAM_TRANSLATIONS = {
"en": {
"postcode": "Postcode of your property",
"uprn": "Unique Property Reference Number (UPRN)",
}
}
PARAM_DESCRIPTIONS = {
"en": {
"postcode": "Postcode of your property",
"uprn": "Unique Property Reference Number (UPRN)",
}
}


class Source:
def __init__(self, postcode: str, uprn: str | int):
self._postcode: str = str(postcode).upper()
self._uprn: str = str(uprn)

def fetch(self) -> list[Collection]:
s = requests.Session()

# Get token
r = s.get(
API_URLS["TOKEN"],
headers=HEADERS,
)

soup: BeautifulSoup = BeautifulSoup(r.content, "html.parser")
token: str = soup.find("input", {"type": "hidden"}).get("value")

# get schedule
payload: dict = {
"__RequestVerificationToken": token,
"FF265": f"U{self._uprn}",
"FF265-text": self._postcode,
"FF265lbltxt": "Please select your address",
"FormGuid": "371be01e-1204-428e-bccd-eeacaf7cbfac",
"ObjectTemplateID": "25",
"Trigger": "submit",
"CurrentSectionID": "33",
"TriggerCtl": "",
}
r = s.post(
API_URLS["SCHEDULE"],
headers=HEADERS,
data=payload,
)

soup = BeautifulSoup(r.content, "html.parser")
schedule: list = soup.find_all("div", {"class": "col"})
schedule = [item.text for item in schedule[2:] if item.text != ""]
waste_dates: list = schedule[0::2]
waste_types: list = schedule[1::2]

entries: list = []
for i in range(0, len(waste_types)):
entries.append(
Collection(
date=datetime.strptime(waste_dates[i], "%A %d %B %Y").date(),
t=waste_types[i].replace(" Collection Service", ""),
icon=ICON_MAP.get(
waste_types[i].replace(" Collection Service", "")
),
)
)

return entries
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import requests
import datetime
from bs4 import BeautifulSoup
from dateutil import parser
from waste_collection_schedule import Collection # type: ignore[attr-defined]
Expand Down Expand Up @@ -40,6 +41,8 @@ def fetch(self):
for i in range(len(dates)):
bint = " ".join(bins[i].text.split()[2:4])
date = parser.parse(dates[i].text).date()
if date.month == 1 and datetime.date.today().month == 12 and date.year == datetime.date.today().year:
date = date.replace(year=date.year+1)
entries.append(
Collection(
date=date,
Expand Down
Loading

0 comments on commit b6097af

Please sign in to comment.