tools/lqdn/scrap_votes.py

47 lines
1.6 KiB
Python

# encoding: utf-8
from datetime import datetime
from string import ascii_uppercase
from time import sleep
import sys
from bs4 import BeautifulSoup
import csv
import requests
from slugify import slugify
with open("lqdn_representatives.txt") as file_handler:
deputes = file_handler.read().splitlines()
with open("lqdn_votes.csv", "w", encoding="utf-8", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["name", "slug", "matter", "subject", "date", "extract", "source_url"])
for depute in deputes:
# Do not DDOS lqdn wiki ;o)
sleep(.2)
content = requests.get(f"https://wiki.laquadrature.net/{depute}").text
soup = BeautifulSoup(content, features="lxml")
deputy = soup.find("span", attrs={"class": "mw-headline"})
if deputy is not None:
stance_author = deputy.text.split(",")[0].split(":")[1].strip()
else:
stance_author = depute
print(stance_author)
votes = soup.find("span", attrs={"id": "Votes"}).parent.find_next_sibling("ul")
if votes is not None:
for vote in votes.find_all("li"):
pass##print(f" {vote}")
writer.writerow([
stance_author,
slugify(stance_author),
stance_matter,
stance_subject,
stance_date,
stance_quote,
stance_link,
])
print(".", end="")
sys.stdout.flush()
print()