-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrape_webpages.py
More file actions
37 lines (33 loc) · 830 Bytes
/
scrape_webpages.py
File metadata and controls
37 lines (33 loc) · 830 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import bs4
from bs4 import BeautifulSoup
import requests
def scan_links():
google_search = "https://www.google.com/search?q="+"samsung"
reddit_posts = "https://www.reddit.com/r/GalaxyS21/"
i = input("google (g) reddit (r)")
if i == "g":
get_links = requests.get(google_search, timeout=5)
elif i == "r":
get_links = requests.get(reddit_posts, timeout=5)
else:
scan_links()
soup = BeautifulSoup(get_links.content, 'html.parser')
a = soup.find_all('div')
a = str(a)
x = ""
word_data = []
hreflinks = []
for l in a:
if l == " ":
if x.isalpha() == True:
word_data.append(x)
if "href" in x:
hreflinks.append(x)
x = ""
else:
x += l
print(word_data)
print(hreflinks)
txt = ''
array = []
scan_links()