gnews_keyword.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. from selenium import webdriver
  2. from selenium.webdriver.common.by import By
  3. from selenium.webdriver.common.action_chains import ActionChains
  4. from selenium.webdriver.common.keys import Keys
  5. from selenium.webdriver.support.wait import WebDriverWait
  6. import time, pickle, sys, os, re, time, requests
  7. import dataset
  8. import traceback
  9. import pandas as pd
  10. from datetime import datetime, timedelta
  11. from newspaper import Article
  12. from utility import log
  13. from bs4 import BeautifulSoup
  14. # from ckiptagger import WS, POS, NER
  15. # remote : http://172.17.0.2:4444
  16. logger_ = log.init_logging('gnews', 'gnews')
  17. current = datetime.today().strftime("%Y{y}%m{m}%d{d}").format(y='年', m='月', d='日')
  18. def brower_start(port):
  19. options = webdriver.ChromeOptions()
  20. browser = webdriver.Remote(
  21. command_executor='http://127.0.0.1:'+str(port)+'/wd/hub',
  22. desired_capabilities=options.to_capabilities()
  23. )
  24. return browser
  25. def build_cache(db, table):
  26. id_dict=[]
  27. cursor = db.query('SELECT url FROM gnews.{};'.format(table))
  28. for c in cursor:
  29. id_dict += [c['url']]
  30. return id_dict
  31. def conv_time(t):
  32. min = int(re.findall('\d+', t)[0])
  33. if u'秒' in t:
  34. s = (datetime.now() - timedelta(seconds=min)
  35. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  36. elif u'分鐘' in t:
  37. s = (datetime.now() - timedelta(minutes=min)
  38. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  39. elif u'小時' in t:
  40. s = (datetime.now() - timedelta(hours=min)
  41. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  42. elif u'天' in t:
  43. s = (datetime.now() - timedelta(days=min)
  44. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  45. elif u'週' in t:
  46. s = (datetime.now() - timedelta(days=min*7)
  47. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  48. else:
  49. s = t
  50. return s
  51. def page_down_(driver, time_):
  52. for i in range(time_):
  53. try:
  54. actions = ActionChains(driver)
  55. actions.send_keys(Keys.PAGE_DOWN).perform()
  56. except:
  57. traceback.print_exc()
  58. time.sleep(0.5)
  59. # def get_trends(q, url_table, id_cache, driver):
  60. # driver.get("https://www.google.com/search?q={}&tbm=nws".format(q))
  61. # time.sleep(3)
  62. # driver.refresh()
  63. # c = 0
  64. # while True:
  65. # time.sleep(3)
  66. # c += 1
  67. # logger_.info('page {}'.format(c))
  68. # print(driver.current_url)
  69. # # print(driver.page_source)
  70. # elmts = driver.find_elements_by_xpath("//div[@class='xuvV6b BGxR7d']")
  71. # print(elmts)
  72. # for elmt in elmts:
  73. # title, url, company = '', '', ''
  74. # e = elmt.find_element_by_xpath(".//div[@role='heading']")
  75. # title = e.text
  76. # # print(title)
  77. # url = elmt.find_element_by_xpath(".//a[@class='WlydOe']").get_attribute('href')
  78. # # print(url)
  79. # company = elmt.find_element_by_xpath(".//div[@class='CEMjEf NUnG9d']").text
  80. # # print(company)
  81. # day = elmt.find_element_by_xpath(".//div[@class='OSrXXb ZE0LJd']").text
  82. # day = conv_time(day)
  83. # # print(day)
  84. # print(title, url, company, day)
  85. # if url not in id_cache:
  86. # url_table.insert({
  87. # 'title': title,
  88. # 'url': url,
  89. # 'keyword': q,
  90. # 'company': company,
  91. # 'day': str(day),
  92. # 'crawler_date': current,
  93. # 'page': c,
  94. # '_status': 0
  95. # })
  96. # if c > 3: break
  97. # next_url = driver.current_url
  98. # next_url = next_url.replace('start={}'.format(c-1)*10,'start={}'.format(c)*10)
  99. # driver.get(next_url)
  100. # print(next_url)
  101. # # try:
  102. # # page_down_(driver, 3)
  103. # # next_url = driver.find_element_by_xpath("//a[@id='pnnext']").get_attribute('href')
  104. # # driver.get(next_url)
  105. # # print(next_url)
  106. # # driver.implicitly_wait(5)
  107. # # ActionChains(driver).move_to_element(element).click(element).perform()
  108. # # except:
  109. # # print('done')
  110. # # break
  111. # logger_.info('{} news list update'.format(q))
  112. # return driver
  113. def our_rule(url, company, driver):
  114. url_domain_list = ['買購不動產新聞台', 'HiNet 新聞社群', '好房網News', '自由時報地產天下', '經濟日報',
  115. '台灣醒報 Awakening News Network', '自由時報電子報', '自由電子報市場動態', '自由財經',
  116. 'Bella儂儂', '康健雜誌', '台灣蘋果日報 娛樂時尚', '台灣蘋果日報', '台灣蘋果日報 動新聞',
  117. '公視新聞', '公民新聞', '自由娛樂', 'HiNet生活誌 - 中華電信']
  118. detail_content = ""
  119. if url.find('hk') == -1 and url.find('hongkong') == -1 and url.find('youtube') == -1:
  120. if company in url_domain_list:
  121. driver.get(url)
  122. if company == '買購不動產新聞台':
  123. e = driver.find_elements_by_xpath(
  124. ".//div[@class='content-font']")
  125. elif company == 'HiNet 新聞社群':
  126. e = driver.find_elements_by_xpath(".//div[@id='detail']")
  127. elif company == '好房網News':
  128. e = driver.find_elements_by_xpath(
  129. ".//div[@itemprop='articleBody']")
  130. elif company == '自由時報地產天下':
  131. e = driver.find_elements_by_xpath(".//div[@data-desc='內文']")
  132. elif company == '經濟日報':
  133. e = driver.find_elements_by_xpath(".//div[@id='article_body']")
  134. elif company == '台灣醒報 Awakening News Network':
  135. e = driver.find_elements_by_xpath(
  136. ".//div[@class='markdown-body']")
  137. elif company == '自由時報電子報' or company == '自由電子報市場動態' or company == '自由財經':
  138. e = driver.find_elements_by_xpath(".//div[@class='text']")
  139. elif company == 'Bella儂儂':
  140. e = driver.find_elements_by_xpath(".//div[@id='content_div']")
  141. elif company == '康健雜誌':
  142. e = driver.find_elements_by_xpath(
  143. ".//div[@class='limitContent']")
  144. elif company == '台灣蘋果日報' or company == '台灣蘋果日報 動新聞':
  145. e = driver.find_elements_by_xpath(
  146. ".//div[@class='text--desktop text--mobile article-text-size_md tw-max_width']")
  147. elif company == '台灣蘋果日報 娛樂時尚':
  148. e = driver.find_elements_by_xpath(
  149. ".//p[@class='text--desktop text--mobile article-text-size_md tw-max_width']")
  150. elif company == '公視新聞':
  151. e = driver.find_elements_by_xpath(
  152. ".//article[@class='post-article']")
  153. elif company == 'udn 房地產':
  154. e = driver.find_elements_by_xpath(
  155. ".//div[@id='story_body_content']")
  156. elif company == '公民新聞':
  157. e = driver.find_elements_by_xpath(
  158. ".//div[@class='field-items']")
  159. elif company == '自由娛樂':
  160. e = driver.find_elements_by_xpath(".//div[@class='text']")
  161. elif company == 'HiNet生活誌 - 中華電信':
  162. e = driver.find_elements_by_xpath(".//div[@id='detail']")
  163. for i in e:
  164. detail_content += i.text
  165. return detail_content
  166. def content_download(url):
  167. article = Article(url)
  168. article.download()
  169. article.parse()
  170. return article.text, article.publish_date
  171. def detail_crawler(data, detail_table, url_table, error_table, driver):
  172. error_list = []
  173. for key, group in data.iterrows():
  174. url = group['url']
  175. print(url)
  176. company = group['company']
  177. date = group['day']
  178. try:
  179. detail_content = our_rule(url, company, driver)
  180. if detail_content == '':
  181. detail_content, date = content_download(url)
  182. if detail_content == '':
  183. logger_.warning('{} : cannot find content'.format(url))
  184. error_list += [url]
  185. error_table.insert({
  186. 'url':url,
  187. 'keyword': group['keyword'],
  188. 'error_message': 'cannot find conten',
  189. 'crawler_date': current
  190. })
  191. detail_table.insert({
  192. 'url': url,
  193. 'keyword': group['keyword'],
  194. 'detail_content': detail_content,
  195. 'date': str(date),
  196. 'company': company,
  197. 'page': group['page'],
  198. 'crawler_date': current
  199. })
  200. url_table.upsert({'url':url,'_status':1},['url'])
  201. time.sleep(2)
  202. except Exception as e:
  203. error_table.insert({
  204. 'url':url,
  205. 'keyword': group['keyword'],
  206. 'error_message': str(e),
  207. 'crawler_date': current
  208. })
  209. return driver
  210. def get_next_job(db, table, query_key):
  211. result = db.query("select * from gnews.{} where _status=0 and keyword='{}' and crawler_date='{}'".format(table, query_key, current))
  212. url_pd = pd.DataFrame([dict(i) for i in result])
  213. return url_pd
  214. def get_trends(q, url_table, id_cache, driver, url, c):
  215. print(url)
  216. driver.get(url)
  217. time.sleep(3)
  218. print(driver.current_url)
  219. # print(driver.page_source)
  220. elmts = driver.find_elements_by_xpath("//div[@class='xuvV6b BGxR7d']")
  221. print(elmts)
  222. for elmt in elmts:
  223. title, url, company = '', '', ''
  224. e = elmt.find_element_by_xpath(".//div[@role='heading']")
  225. title = e.text
  226. # print(title)
  227. url = elmt.find_element_by_xpath(".//a[@class='WlydOe']").get_attribute('href')
  228. # print(url)
  229. company = elmt.find_element_by_xpath(".//div[@class='CEMjEf NUnG9d']").text
  230. # print(company)
  231. day = elmt.find_element_by_xpath(".//div[@class='OSrXXb ZE0LJd']").text
  232. day = conv_time(day)
  233. # print(day)
  234. print(title, url, company, day)
  235. if url not in id_cache:
  236. url_table.insert({
  237. 'title': title,
  238. 'url': url,
  239. 'keyword': q,
  240. 'company': company,
  241. 'day': str(day),
  242. 'crawler_date': current,
  243. 'page': c,
  244. '_status': 0
  245. })
  246. # next_url = driver.current_url
  247. # next_url = next_url.replace('start={}'.format(c-1)*10,'start={}'.format(c)*10)
  248. # driver.get(next_url)
  249. # print(next_url)
  250. next_url = driver.find_element_by_xpath("//a[@id='pnnext']").get_attribute('href')
  251. logger_.info('{} news list update'.format(q))
  252. return driver, next_url
  253. def main():
  254. if len(sys.argv) > 1 :
  255. port = int(sys.argv[1])
  256. print('restart docker p{}'.format(port))
  257. os.system('sudo docker container restart p'+str(port))
  258. time.sleep(8)
  259. keyword = sys.argv[2]
  260. print(port)
  261. driver = brower_start(port)
  262. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/gnews?charset=utf8mb4')
  263. url_table_name = 'url_list2'
  264. url_table = db[url_table_name]
  265. detail_table = db['gnews_detail2']
  266. error_table = db['error_list']
  267. query_key = keyword
  268. logger_.info('{} start...'.format(query_key))
  269. # find new news url
  270. id_cache = build_cache(db, url_table_name)
  271. url = "https://www.google.com/search?q={}&tbm=nws".format(query_key)
  272. # url = "https://www.google.com"
  273. for i in range(3):
  274. logger_.info('page {}'.format(i+1))
  275. driver, url = get_trends(query_key, url_table, id_cache, driver, url, i)
  276. time.sleep(5)
  277. url_pd = get_next_job(db, url_table_name, query_key)
  278. logger_.info('find {} news...'.format(len(url_pd)))
  279. driver = detail_crawler(url_pd, detail_table, url_table, error_table, driver)
  280. logger_.info('{} news description update'.format(query_key))
  281. db.close()
  282. driver.close()
  283. if __name__ == "__main__":
  284. main()