uulm-coronang-autojoin/main.py

118 lines
4.7 KiB
Python
Raw Normal View History

2024-04-16 11:59:48 +00:00
import time
import requests
from bs4 import BeautifulSoup
import re
import argparse
# Create an ArgumentParser object
parser = argparse.ArgumentParser(description="CoronaNG Autojoin Script")
# Add arguments for cvid and jsessionid
parser.add_argument("--cvid", type=int, required=True, help="The cvid value")
parser.add_argument("--jsessionid", type=str, required=True, help="The JSESSIONID cookie value")
# Parse the command-line arguments
args = parser.parse_args()
# URL to scrape
url = f"https://campusonline.uni-ulm.de/CoronaNG/user/userDetails.html?cvid={args.cvid}"
# Cookie to include in the request
cookie = f"JSESSIONID={args.jsessionid}"
# Headers for the GET request
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:124.0) Gecko/20100101 Firefox/124.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Accept-Language": "en-GB,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Referer": url,
"Connection": "keep-alive",
"Cookie": cookie,
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
}
def make_post_request(url, headers, payload, max_retries=3):
retries = 0
while retries < max_retries:
try:
response = requests.post(url, headers=headers, data=payload)
response.raise_for_status() # Raise an exception for 4xx or 5xx status codes
return response
except requests.exceptions.RequestException as e:
print(f"Error occurred during POST request: {str(e)}")
retries += 1
if retries < max_retries:
print(f"Retrying in 5 seconds... (Attempt {retries+1}/{max_retries})")
time.sleep(5)
else:
print("Max retries reached. Exiting.")
exit(1)
while True:
try:
print("Scraping...")
# Send GET request to the URL with the specified headers
response = requests.get(url, headers=headers)
response.raise_for_status() # Raise an exception for 4xx or 5xx status codes
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(response.content, "html.parser")
# Find the <tr> element with class "dbu"
tr_elements = soup.find_all("tr", class_="dbu")
for tr_element in tr_elements:
th_element = tr_element.find("th")
if th_element and "Max. Teilnehmer" in th_element.text:
# Extract the max and current participant numbers
td_element = tr_element.find("td")
participant_info = td_element.text.strip()
# regex to find the numbers in a string like "10 (aktuell 10)"
regex = r"(\d+) \(aktuell (\d+)\)"
match = re.search(regex, participant_info)
if match:
max_participants = int(match.group(1))
current_participants = int(match.group(2))
print("Max participants:", max_participants, "; Current participants:", current_participants)
else:
print("Failed to parse participant info:", participant_info)
continue
# Check if there is a free spot
if current_participants < max_participants:
# Send POST request to participate
post_url = "https://campusonline.uni-ulm.de/CoronaNG/user/userDetails.html"
payload = f"id={args.cvid}&command=participate"
post_headers = headers.copy()
post_headers["Content-Type"] = "application/x-www-form-urlencoded"
post_headers["Content-Length"] = str(len(payload))
post_headers["Origin"] = "https://campusonline.uni-ulm.de"
post_response = make_post_request(post_url, post_headers, payload)
if post_response.status_code == 200:
print("Successfully participated!")
exit(0)
else:
print("Failed to participate. Status code:", post_response.status_code)
exit(1)
else:
print("No free spots available.")
break
else:
print("Participant information not found.")
except requests.exceptions.RequestException as e:
print(f"Error occurred during GET request: {str(e)}")
print(f'Current Time: {time.strftime("%H:%M:%S")}. Sleeping for 30 seconds...')
time.sleep(30)