crawl_web.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. import traceback
  2. from selenium import webdriver
  3. from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
  4. import time
  5. import os
  6. import datetime
  7. import urllib.parse
  8. from selenium.webdriver.support.ui import WebDriverWait
  9. from selenium.webdriver.common.by import By
  10. from selenium.webdriver.support import expected_conditions as EC
  11. import codecs
  12. import random
  13. from bs4 import BeautifulSoup
  14. import requests
  15. import time
  16. # import rpyc
  17. import sys
  18. import docker
  19. # import googlesearch
  20. import codecs
  21. import sys
  22. import time
  23. import dataset
  24. import os
  25. import html2text
  26. def process_one(driver):
  27. lst=[]
  28. elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
  29. for elmt in elmts:
  30. try:
  31. href=elmt.get_attribute('href')
  32. # print(href)
  33. txt=elmt.text.split('\n')
  34. print(txt[0])
  35. lst.append({'title':txt[0],'url':href})
  36. except:
  37. print('href2 exception')
  38. traceback.print_exc()
  39. return lst
  40. def process_query(driver,qs,number_results=10,language_code='zh-TW',enable_next=True):
  41. escaped_search_term=urllib.parse.quote(qs)
  42. googleurl = 'https://www.google.com/search?q={}&num={}&hl={}'.format(escaped_search_term, number_results+1,language_code)
  43. print(googleurl)
  44. driver.get(googleurl)
  45. time.sleep(3)
  46. totallst=[]
  47. while True:
  48. lst=process_one(driver)
  49. totallst+=lst
  50. try:
  51. if enable_next:
  52. time.sleep(3)
  53. elmt=driver.find_element_by_xpath("//a[@id='pnnext']")
  54. webdriver.ActionChains(driver).move_to_element(elmt).perform()
  55. webdriver.ActionChains(driver).move_to_element(elmt).click().perform()
  56. else:
  57. break
  58. except:
  59. traceback.print_exc()
  60. print('pnnext exception')
  61. break
  62. time.sleep(1.5)
  63. return totallst
  64. result=[]
  65. driver=None
  66. def restart_browser():
  67. # os.system('docker container restart p4444')
  68. # time.sleep(10)
  69. options = webdriver.ChromeOptions()
  70. # options.add_argument("--proxy-server=http://80.48.119.28:8080")
  71. # driver=webdriver.Chrome(executable_path='/Users/zooeytsai/Downloads/chromedriver',options=options)
  72. driver=webdriver.Chrome(desired_capabilities=options.to_capabilities())
  73. #driver = webdriver.Remote(
  74. # command_executor='http://127.0.0.1:4444/wd/hub',
  75. #desired_capabilities=options.to_capabilities())
  76. # desired_capabilities=DesiredCapabilities.CHROME)
  77. driver.set_window_size(1400,1000)
  78. return driver
  79. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
  80. table=db['kw_url_search_result']
  81. driver=restart_browser()
  82. lst=process_query(driver,'班尼斯 site:mobile01.com',number_results=50,language_code='zh-TW',enable_next=False)
  83. for l in lst:
  84. table.insert(l)
  85. print(lst)
  86. #print(html2text.html2text("<p><strong>Zed's</strong> dead baby, <em>Zed's</em> dead.</p>"))