gsearch_top.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. from selenium import webdriver
  2. from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
  3. import time
  4. import os
  5. import datetime
  6. import urllib.parse
  7. from selenium.webdriver.support.ui import WebDriverWait
  8. from selenium.webdriver.common.by import By
  9. from selenium.webdriver.support import expected_conditions as EC
  10. import codecs
  11. import random
  12. from bs4 import BeautifulSoup
  13. import requests
  14. import time
  15. import rpyc
  16. import sys
  17. import docker
  18. import googlesearch
  19. import codecs
  20. import sys
  21. import time
  22. import dataset
  23. import os
  24. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/hhh?charset=utf8mb4')
  25. #cursor=db.query('SELECT kw FROM hhh.hhh_contentgap_serp where ranking is not null;')
  26. #cursor=db.query('SELECT kw FROM hhh.hhh_contentgap_serp where kw not in (select distinct kw from hhh_contentgap_serp where id >= 155)')
  27. kwlst={}
  28. #for c in cursor:
  29. # kwlst[c['kw']]=1
  30. table=db['hhh_top_serp']
  31. curdir=os.path.realpath('.')
  32. #fr=codecs.open(curdir+os.sep+'contentgap.txt','r','utf-8')
  33. #fr=codecs.open(curdir+os.sep+'hhh\\seo\\contentgap.txt','r','utf-8')
  34. #fr=codecs.open('C:\\gitlab\\kw_tools\\top.csv','r','utf-8')
  35. #lines=fr.readlines()
  36. lst=[]
  37. cursor=db.query('select term from hhh.content_top_terms where term not in (SELECT kw FROM hhh.hhh_top_serp where datediff(now(),dt) =0 and ranking is not null )')
  38. for c in cursor:
  39. lst.append(c['term'])
  40. #for l in lines:
  41. #for l in lines[35:]:
  42. #for l in lines[49:]:
  43. #for l in lines[34:]:
  44. #for l in lines[41:]:
  45. #
  46. # lst.append(l.replace('\n',''))
  47. headers = {
  48. "Authorization": "Bearer " + "t35vhZtWNgvDNWHc3DJh0OKll3mcB9GvC8K2EAkBug2",
  49. "Content-Type": "application/x-www-form-urlencoded"
  50. }
  51. def send_msg(kw):
  52. params = {"message": "處理關鍵字: "+kw}
  53. r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
  54. def empty_query(q):
  55. global driver
  56. googleurl='https://www.google.com/search?q='+urllib.parse.quote(q)
  57. driver.get(googleurl)
  58. time.sleep(3)
  59. def process_query(qs,number_results=10,language_code='en',pat='hhh.com.tw'):
  60. global driver
  61. escaped_search_term=urllib.parse.quote(qs)
  62. # escaped_search_term = qs.replace(' ', '+')
  63. # googleurl='https://www.google.com/search?q='+
  64. googleurl = 'https://www.google.com/search?q={}&num={}&hl={}'.format(escaped_search_term, number_results+1,language_code)
  65. driver.get(googleurl)
  66. elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
  67. idx=0
  68. for elmt in elmts:
  69. try:
  70. href=elmt.get_attribute('href')
  71. print(str(idx)+': '+href)
  72. if pat in href:
  73. return idx
  74. idx+=1
  75. except:
  76. print('href exception')
  77. try:
  78. elmt=driver.find_element_by_xpath("//a[@id='pnnext']")
  79. webdriver.ActionChains(driver).move_to_element(elmt).perform()
  80. webdriver.ActionChains(driver).move_to_element(elmt).click().perform()
  81. except:
  82. print('pnnext exception')
  83. return None
  84. time.sleep(4)
  85. elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
  86. for elmt in elmts:
  87. try:
  88. href=elmt.get_attribute('href')
  89. print(str(idx)+': '+href)
  90. if pat in href:
  91. return idx
  92. idx+=1
  93. except:
  94. print('href2 exception')
  95. result=[]
  96. driver=None
  97. def restart_browser():
  98. client = docker.from_env()
  99. ls=client.containers.list()
  100. print(ls)
  101. ls[0].restart()
  102. time.sleep(10)
  103. # options = webdriver.ChromeOptions()
  104. #driver=webdriver.Chrome(desired_capabilities=options.to_capabilities())
  105. driver = webdriver.Remote(
  106. command_executor='http://127.0.0.1:4444/wd/hub',
  107. # command_executor='http://172.104.93.163:4444/wd/hub',
  108. #command_executor='http://dev2.choozmo.com:14444/wd/hub',
  109. # desired_capabilities=options.to_capabilities())
  110. desired_capabilities=DesiredCapabilities.CHROME)
  111. driver.set_window_size(1400,1000)
  112. return driver
  113. for l in lst:
  114. #for l in lst[2:]:
  115. if True:
  116. # if kwlst.get(l) is None:
  117. driver=restart_browser()
  118. # l='房間 油漆'
  119. # idx=process_query(,number_results=100,language_code='zh-TW',pat='hhh.com.tw')
  120. idx=process_query(l,number_results=100,language_code='zh-TW',pat='hhh.com.tw')
  121. if idx==None:
  122. print(driver.page_source)
  123. if '我們的系統偵測到您的電腦網路送出的流量有異常情況' in driver.page_source:
  124. print('baned.....')
  125. sys.exit()
  126. table.insert({'kw':l,'ranking':idx,'dt':datetime.datetime.now()})
  127. print({'kw':l,'ranking':idx})
  128. db.commit()
  129. # time.sleep(9999)
  130. # time.sleep(4)