import time import requests from bs4 import BeautifulSoup import re import argparse # Create an ArgumentParser object parser = argparse.ArgumentParser(description="CoronaNG Autojoin Script") # Add arguments for cvid and jsessionid parser.add_argument("--cvid", type=int, required=True, help="The cvid value") parser.add_argument("--jsessionid", type=str, required=True, help="The JSESSIONID cookie value") # Parse the command-line arguments args = parser.parse_args() # URL to scrape url = f"https://campusonline.uni-ulm.de/CoronaNG/user/userDetails.html?cvid={args.cvid}" # Cookie to include in the request cookie = f"JSESSIONID={args.jsessionid}" # Headers for the GET request headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:124.0) Gecko/20100101 Firefox/124.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", "Accept-Language": "en-GB,en;q=0.5", "Accept-Encoding": "gzip, deflate, br", "Referer": url, "Connection": "keep-alive", "Cookie": cookie, "Upgrade-Insecure-Requests": "1", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "same-origin", "Sec-Fetch-User": "?1", "Pragma": "no-cache", "Cache-Control": "no-cache" } def make_post_request(url, headers, payload, max_retries=3): retries = 0 while retries < max_retries: try: response = requests.post(url, headers=headers, data=payload) response.raise_for_status() # Raise an exception for 4xx or 5xx status codes return response except requests.exceptions.RequestException as e: print(f"Error occurred during POST request: {str(e)}") retries += 1 if retries < max_retries: print(f"Retrying in 5 seconds... (Attempt {retries+1}/{max_retries})") time.sleep(5) else: print("Max retries reached. Exiting.") exit(1) while True: try: print("Scraping...") # Send GET request to the URL with the specified headers response = requests.get(url, headers=headers) response.raise_for_status() # Raise an exception for 4xx or 5xx status codes # Parse the HTML content using BeautifulSoup soup = BeautifulSoup(response.content, "html.parser") # Find the