-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathurlscraper.py
74 lines (58 loc) · 1.98 KB
/
urlscraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import base64
import pandas as pd
import streamlit as st
from newspaper import Article
import uuid
from tqdm import tqdm
from urllib.parse import urlparse
# Function to scrape articles from URLs with a progress bar
def scrape_articles_from_urls_with_progress(url_file):
rows = []
with open(url_file, 'r') as file:
url_list = [line.strip() for line in file]
# Create a placeholder for the progress bar
progress_bar = st.progress(0)
# Use st.empty() to create a container for updates
progress_text = st.empty()
# Loop through URLs
for i, url in enumerate(url_list):
try:
a = Article(url, language='id')
a.download()
a.parse()
date = a.publish_date
title = a.title
text = a.text
# Remove line breaks from the 'Text' column
text = text.replace('\n', ' ')
unique_identifier = str(uuid.uuid4())[:16]
# Extract only the root domain name from the URL
domain = urlparse(url).netloc.split('.')[-2] + '.' + urlparse(url).netloc.split('.')[-1]
row = {
'Datetime': date,
'Title': title,
'Text': text,
'URL': url,
'TextID': unique_identifier,
'Publication': domain
}
rows.append(row)
except Exception as e:
print(e)
row = {
'Datetime': 'N/A',
'Title': 'N/A',
'Text': 'N/A',
'URL': url,
'TextID': 'N/A',
'Publication': 'N/A'
}
rows.append(row)
# Update progress bar and text
progress_percentage = (i + 1) / len(url_list)
progress_bar.progress(progress_percentage)
progress_text.text(f"Scraping progress: {int(progress_percentage * 100)}%")
# Close the progress bar
progress_bar.empty()
df = pd.DataFrame(rows)
return df