new_100.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import random
  2. import sys
  3. import dataset
  4. from selenium import webdriver
  5. import traceback
  6. import datetime
  7. import codecs
  8. import time
  9. import urllib
  10. import argparse
  11. import schedule
  12. import logging
  13. import sys
  14. from logging.handlers import SysLogHandler
  15. import socket
  16. import pandas as pd
  17. import socket
  18. import os
  19. _LOG_SERVER = ('hhh.ptt.cx', 514)
  20. logger = logging.getLogger('clickbot_100')
  21. handler1 = SysLogHandler(address=_LOG_SERVER,socktype=socket.SOCK_DGRAM)
  22. logger.addHandler(handler1)
  23. #logger.debug('[clickbot_100][清原]begin')
  24. hname=socket.gethostname()
  25. pid=str(os.getpid())
  26. logger.fatal('[clickbot_100]['+hname+']['+pid+']begin')
  27. def restart_browser():
  28. options = webdriver.ChromeOptions()
  29. options.add_argument('--headless')
  30. driver=webdriver.Chrome(options=options)
  31. driver.set_window_size(950,6000)
  32. return driver
  33. def process_one():
  34. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
  35. lst=[]
  36. table=db['google_rank']
  37. cursor = db.query('select term from seo.selected_kw')
  38. # cursor=db.query('select term from selected_kw and term not in (SELECT distinct(keyword) FROM ig_tags.save_result where url like "%beastparadise.net%" and datediff(now(),dt)=0)')
  39. for c in cursor:
  40. lst.append(c['term'])
  41. term=random.choice(lst)
  42. print(term)
  43. logger.debug('[clickbot_100]['+term+']')
  44. driver=restart_browser()
  45. escaped_search_term=urllib.parse.quote(term)
  46. googleurl = 'https://www.google.com/search?q={}&num={}&hl={}'.format(escaped_search_term, 100,'zh-TW')
  47. print(googleurl)
  48. driver.get(googleurl)
  49. time.sleep(6)
  50. fname=term.replace(' ','_')
  51. # driver.save_screenshot('c:/tmp/seo/'+fname+'.png')
  52. df=pd.DataFrame()
  53. # driver.get_screenshot_as_file("/Users/zooeytsai/排名100.png")
  54. elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
  55. clickelmt=None
  56. cnt=1
  57. datadict={'搜尋詞':[],'結果標題':[],'結果網址':[],'結果名次':[]}
  58. for elmt in elmts:
  59. try:
  60. href=elmt.get_attribute('href')
  61. if 'taroboba-yuan.com' in href:
  62. clickelmt=elmt
  63. logger.debug('[clickbot_100]['+term+']['+str(cnt)+']')
  64. print(href)
  65. print(elmt.text)
  66. datadict['搜尋詞'].append(term)
  67. datadict['結果標題'].append(elmt.text)
  68. datadict['結果網址'].append(href)
  69. datadict['結果名次'].append(str(cnt))
  70. table.insert({'title':elmt.text,'url':href,'keyword':term,'dt':datetime.datetime.now(),'num':cnt})
  71. cnt+=1
  72. except:
  73. print('href2 exception')
  74. traceback.print_exc()
  75. if clickelmt:
  76. webdriver.ActionChains(driver).move_to_element(clickelmt).perform()
  77. webdriver.ActionChains(driver).move_to_element(clickelmt).click().perform()
  78. if len(datadict['結果標題'])<=0:
  79. print('None')
  80. driver.quit()
  81. sys.exit()
  82. df['搜尋詞']=datadict['搜尋詞']
  83. df['結果標題']=datadict['結果標題']
  84. df['結果網址']=datadict['結果網址']
  85. df['結果名次']=datadict['結果名次']
  86. # df.to_excel('/Users/zooeytsai/'+fname+".xls")
  87. df.to_excel('c:/tmp/'+fname+".xls")
  88. driver.quit()
  89. process_one()
  90. parser = argparse.ArgumentParser()
  91. parser.add_argument('--loop')
  92. args = parser.parse_args()
  93. if args.loop:
  94. # schedule.every(6).minutes.do(process_one)
  95. schedule.every(0.4).minutes.do(process_one)
  96. while True:
  97. schedule.run_pending()
  98. time.sleep(1)