import requests
from bs4 import BeautifulSoup
import argparse

# Set up command-line argument parsing
parser = argparse.ArgumentParser(description="Scrape the '#registrarData' element from a webpage.")
parser.add_argument('url', type=str, help='The domain to scrape WHOIS data for')
args = parser.parse_args()

# URL of the whois page for the domain
whois_url = f"https://www.whois.com/whois/{args.url}"

# Add headers to simulate a browser request
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}

# Fetch the webpage
response = requests.get(whois_url, headers=headers)

# Print response status to debug
# print(f"HTTP Response: {response.status_code}")

# Check if the request was successful
if response.status_code == 200:
    # Parse the HTML content of the page
    soup = BeautifulSoup(response.content, 'html.parser')

    # Find the element with ID 'registrarData'
    registrar_data_element = soup.find(id='registrarData')

    if registrar_data_element:
        # Extract and print the text content of the element
        # print(registrar_data_element.get_text(strip=True))
        print(registrar_data_element.get_text(strip=True).splitlines()[5].replace("Creation Date:", ""))
    else:
        print("No element with ID 'registrarData' found.")
else:
    print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
