gnews_keyword.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. from selenium import webdriver
  2. from selenium.webdriver.common.by import By
  3. from selenium.webdriver.common.action_chains import ActionChains
  4. from selenium.webdriver.support.wait import WebDriverWait
  5. import time, pickle, sys, os, re, time, requests
  6. import dataset
  7. import pandas as pd
  8. from datetime import datetime, timedelta
  9. from newspaper import Article
  10. from utility import log
  11. from bs4 import BeautifulSoup
  12. # from ckiptagger import WS, POS, NER
  13. # remote : http://172.17.0.2:4444
  14. logger_ = log.init_logging('gnews', 'gnews')
  15. current = datetime.today().strftime("%Y{y}%m{m}%d{d}").format(y='年', m='月', d='日')
  16. def brower_start(port):
  17. options = webdriver.ChromeOptions()
  18. browser = webdriver.Remote(
  19. command_executor='http://127.0.0.1:'+str(port)+'/wd/hub',
  20. desired_capabilities=options.to_capabilities()
  21. )
  22. return browser
  23. def build_cache(db, table):
  24. id_dict=[]
  25. cursor = db.query('SELECT url FROM gnews.{};'.format(table))
  26. for c in cursor:
  27. id_dict += [c['url']]
  28. return id_dict
  29. def conv_time(t):
  30. min = int(re.findall('\d+', t)[0])
  31. if u'秒' in t:
  32. s = (datetime.now() - timedelta(seconds=min)
  33. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  34. elif u'分鐘' in t:
  35. s = (datetime.now() - timedelta(minutes=min)
  36. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  37. elif u'小時' in t:
  38. s = (datetime.now() - timedelta(hours=min)
  39. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  40. elif u'天' in t:
  41. s = (datetime.now() - timedelta(days=min)
  42. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  43. elif u'週' in t:
  44. s = (datetime.now() - timedelta(days=min*7)
  45. ).strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')
  46. else:
  47. s = t
  48. return s
  49. def get_trends(q, url_table, id_cache, driver):
  50. driver.get("https://www.google.com/search?q={}&tbm=nws".format(q))
  51. time.sleep(3)
  52. # print(driver.page_source)
  53. # click tool
  54. # element = driver.find_element(By.ID, "hdtb-tls")
  55. # driver.implicitly_wait(5)
  56. # ActionChains(driver).move_to_element(element).click(element).perform()
  57. # click time
  58. # element = driver.find_elements(By.XPATH, "//div[@class='KTBKoe']")
  59. # driver.implicitly_wait(5)
  60. # ActionChains(driver).move_to_element(element[1]).click(element[1]).perform()
  61. # click time
  62. # element = driver.find_element(By.XPATH,"//div[@id='lb']")
  63. # ele = element.find_elements(By.XPATH,"//g-menu-item[@class='ErsxPb']")
  64. # for e in ele:
  65. # if e.text == '過去 24 小時':
  66. # print(e.text)
  67. # driver.implicitly_wait(5)
  68. # ActionChains(driver).move_to_element(e).click(e).perform()
  69. # break
  70. c = 0
  71. while True:
  72. time.sleep(3)
  73. c += 1
  74. logger_.info('page {}'.format(c))
  75. print(driver.page_source)
  76. elmts = driver.find_elements_by_xpath("//div[@class='xuvV6b BGxR7d']")
  77. print(elmts)
  78. for elmt in elmts:
  79. title, url, company = '', '', ''
  80. e = elmt.find_element_by_xpath(".//div[@role='heading']")
  81. title = e.text
  82. # print(title)
  83. url = elmt.find_element_by_xpath(".//a[@class='WlydOe']").get_attribute('href')
  84. # print(url)
  85. company = elmt.find_element_by_xpath(".//div[@class='CEMjEf NUnG9d']").text
  86. # print(company)
  87. day = elmt.find_element_by_xpath(".//div[@class='OSrXXb ZE0LJd']").text
  88. day = conv_time(day)
  89. # print(day)
  90. print(title, url, company, day)
  91. if url not in id_cache:
  92. url_table.insert({
  93. 'title': title,
  94. 'url': url,
  95. 'keyword': q,
  96. 'company': company,
  97. 'day': str(day),
  98. 'crawler_date': current,
  99. 'page': c,
  100. '_status': 0
  101. })
  102. if c > 3: break
  103. try:
  104. element = driver.find_element_by_xpath("//a[@id='pnnext']")
  105. driver.implicitly_wait(5)
  106. ActionChains(driver).move_to_element(element).click(element).perform()
  107. except:
  108. print('done')
  109. break
  110. logger_.info('{} news list update'.format(q))
  111. return driver
  112. def our_rule(url, company, driver):
  113. url_domain_list = ['買購不動產新聞台', 'HiNet 新聞社群', '好房網News', '自由時報地產天下', '經濟日報',
  114. '台灣醒報 Awakening News Network', '自由時報電子報', '自由電子報市場動態', '自由財經',
  115. 'Bella儂儂', '康健雜誌', '台灣蘋果日報 娛樂時尚', '台灣蘋果日報', '台灣蘋果日報 動新聞',
  116. '公視新聞', '公民新聞', '自由娛樂', 'HiNet生活誌 - 中華電信']
  117. detail_content = ""
  118. if url.find('hk') == -1 and url.find('hongkong') == -1 and url.find('youtube') == -1:
  119. if company in url_domain_list:
  120. driver.get(url)
  121. if company == '買購不動產新聞台':
  122. e = driver.find_elements_by_xpath(
  123. ".//div[@class='content-font']")
  124. elif company == 'HiNet 新聞社群':
  125. e = driver.find_elements_by_xpath(".//div[@id='detail']")
  126. elif company == '好房網News':
  127. e = driver.find_elements_by_xpath(
  128. ".//div[@itemprop='articleBody']")
  129. elif company == '自由時報地產天下':
  130. e = driver.find_elements_by_xpath(".//div[@data-desc='內文']")
  131. elif company == '經濟日報':
  132. e = driver.find_elements_by_xpath(".//div[@id='article_body']")
  133. elif company == '台灣醒報 Awakening News Network':
  134. e = driver.find_elements_by_xpath(
  135. ".//div[@class='markdown-body']")
  136. elif company == '自由時報電子報' or company == '自由電子報市場動態' or company == '自由財經':
  137. e = driver.find_elements_by_xpath(".//div[@class='text']")
  138. elif company == 'Bella儂儂':
  139. e = driver.find_elements_by_xpath(".//div[@id='content_div']")
  140. elif company == '康健雜誌':
  141. e = driver.find_elements_by_xpath(
  142. ".//div[@class='limitContent']")
  143. elif company == '台灣蘋果日報' or company == '台灣蘋果日報 動新聞':
  144. e = driver.find_elements_by_xpath(
  145. ".//div[@class='text--desktop text--mobile article-text-size_md tw-max_width']")
  146. elif company == '台灣蘋果日報 娛樂時尚':
  147. e = driver.find_elements_by_xpath(
  148. ".//p[@class='text--desktop text--mobile article-text-size_md tw-max_width']")
  149. elif company == '公視新聞':
  150. e = driver.find_elements_by_xpath(
  151. ".//article[@class='post-article']")
  152. elif company == 'udn 房地產':
  153. e = driver.find_elements_by_xpath(
  154. ".//div[@id='story_body_content']")
  155. elif company == '公民新聞':
  156. e = driver.find_elements_by_xpath(
  157. ".//div[@class='field-items']")
  158. elif company == '自由娛樂':
  159. e = driver.find_elements_by_xpath(".//div[@class='text']")
  160. elif company == 'HiNet生活誌 - 中華電信':
  161. e = driver.find_elements_by_xpath(".//div[@id='detail']")
  162. for i in e:
  163. detail_content += i.text
  164. return detail_content
  165. def content_download(url):
  166. article = Article(url)
  167. article.download()
  168. article.parse()
  169. return article.text, article.publish_date
  170. def detail_crawler(data, detail_table, url_table, error_table, driver):
  171. error_list = []
  172. for key, group in data.iterrows():
  173. url = group['url']
  174. print(url)
  175. company = group['company']
  176. date = group['day']
  177. try:
  178. detail_content = our_rule(url, company, driver)
  179. if detail_content == '':
  180. detail_content, date = content_download(url)
  181. if detail_content == '':
  182. logger_.warning('{} : cannot find content'.format(url))
  183. error_list += [url]
  184. error_table.insert({
  185. 'url':url,
  186. 'keyword': group['keyword'],
  187. 'error_message': 'cannot find conten',
  188. 'crawler_date': current
  189. })
  190. detail_table.insert({
  191. 'url': url,
  192. 'keyword': group['keyword'],
  193. 'detail_content': detail_content,
  194. 'date': str(date),
  195. 'company': company,
  196. 'page': group['page'],
  197. 'crawler_date': current
  198. })
  199. url_table.upsert({'url':url,'_status':1},['url'])
  200. time.sleep(2)
  201. except Exception as e:
  202. error_table.insert({
  203. 'url':url,
  204. 'keyword': group['keyword'],
  205. 'error_message': str(e),
  206. 'crawler_date': current
  207. })
  208. return driver
  209. def get_next_job(db, table, query_key):
  210. result = db.query("select * from gnews.{} where _status=0 and keyword='{}' and crawler_date='{}'".format(table, query_key, current))
  211. url_pd = pd.DataFrame([dict(i) for i in result])
  212. return url_pd
  213. def main():
  214. if len(sys.argv) > 1 :
  215. port = int(sys.argv[1])
  216. print('restart docker pw{}'.format(port))
  217. os.system('sudo docker container restart pw'+str(port))
  218. time.sleep(8)
  219. keyword = sys.argv[2]
  220. driver = brower_start(port)
  221. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/gnews?charset=utf8mb4')
  222. url_table_name = 'url_list2'
  223. url_table = db[url_table_name]
  224. detail_table = db['gnews_detail2']
  225. error_table = db['error_list']
  226. query_key = keyword
  227. logger_.info('{} start...'.format(query_key))
  228. # find new news url
  229. id_cache = build_cache(db, url_table_name)
  230. driver = get_trends(query_key, url_table, id_cache, driver)
  231. time.sleep(5)
  232. url_pd = get_next_job(db, url_table_name, query_key)
  233. logger_.info('find {} news...'.format(len(url_pd)))
  234. driver = detail_crawler(url_pd, detail_table, url_table, error_table, driver)
  235. logger_.info('{} news description update'.format(query_key))
  236. db.close()
  237. driver.close()
  238. if __name__ == "__main__":
  239. main()