-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscraper.py
110 lines (89 loc) · 3.51 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""
Task : #5 Keywordに対して、適切なページ(URL)を決定する
version : 0.0.3
author : da-okazaki
date : 2019.07.11
"""
import feedparser
import urllib.parse
import os
import json
import pprint
import time
import random
input_path = "./data/keyword"
output_path = "./data/url"
while True:
start = time.time()
# ファイル存在チェック
if (os.path.exists(input_path)):
# テキストから文字を抽出
f = open(input_path)
keyword = f.read()
f.close()
# 抽出後ファイル削除
os.remove(input_path)
# US1 (キーワードあり)
if (keyword != "") :
# URL Encoding
s_quote = urllib.parse.quote(keyword)
# Google News Parse
url = "https://news.google.com/news/rss/search/section/q/" + s_quote + "/" + s_quote + "?ned=jp&hl=ja&gl=JP"
d = feedparser.parse(url)
news = list()
for i, entry in enumerate(d.entries, 1) :
p = entry.published_parsed
sortkey = "%04d%02d%02d%02d%02d%02d" % (p.tm_year, p.tm_mon, p.tm_mday, p.tm_hour, p.tm_min, p.tm_sec)
tmp = {
"no": i,
"title": entry.title,
"link": entry.link,
"published": entry.published,
"sortkey": sortkey
}
news.append(tmp)
if (tmp["no"] == 1) :
# OutputFile Create
f = open(output_path,'w')
# URL Description
f.write(entry.link)
f.close()
print("keyword : " + keyword + "\n" + "Date : " + str(tmp["published"]) + "\n" + "Title : " + tmp["title"])
break
news = sorted(news, key=lambda x: x['sortkey'])
#pprint.pprint(news)
# US2 (キーワード無し : TOPICS)
elif (keyword == "") :
# Google News Parse
url = "https://news.google.com/rss?hl=ja&gl=JP&ceid=JP:ja"
d = feedparser.parse(url)
news = list()
topicRandom = random.randrange(1,10)
for i, entry in enumerate(d.entries, 1) :
p = entry.published_parsed
sortkey = "%04d%02d%02d%02d%02d%02d" % (p.tm_year, p.tm_mon, p.tm_mday, p.tm_hour, p.tm_min, p.tm_sec)
tmp = {
"no": i,
"title": entry.title,
"link": entry.link,
"published": entry.published,
"sortkey": sortkey
}
news.append(tmp)
if (tmp["no"] == topicRandom) :
# OutputFile Create
f = open(output_path,'w')
# URL Description
f.write(entry.link)
f.close()
print("keyword : " + "Nothing" + "\n" + "Date : " + str(tmp["published"]) + "\n" + "Title : " + tmp["title"])
break
news = sorted(news, key=lambda x: x['sortkey'])
#pprint.pprint(news)
else :
print ("keyword file not exist")
end = time.time()
try:
time.sleep(15-(end-start))
except ValueError:
print("scraper.py took 15+ seconds. Skip sleep.")