Bläddra i källkod

Merge branch 'master' of http://git.choozmo.com:3000/choozmo/kw_tools

Jared 2 år sedan
förälder
incheckning
0e149e6c12

+ 14 - 11
INNNews/general_clickbot.py

@@ -8,6 +8,7 @@ import urllib.parse
 from selenium.webdriver.support.ui import WebDriverWait
 from selenium.webdriver.common.by import By
 from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.keys import Keys
 from selenium.webdriver.support import expected_conditions as EC
 import codecs
 import random
@@ -22,8 +23,6 @@ import fire
 #pymysql.install_as_MySQLdb()
 
 
-db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
-table=db['general_log']
 driver = None
 
 
@@ -48,19 +47,25 @@ def empty_query(q):
 
 
 def process_query(qs):
+    db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
+    table=db['general_log']
     q=qs[0]
     domain=qs[1]
     global driver
-    googleurl = 'https://www.google.com/search?q={}&num={}&hl={}'.format(urllib.parse.quote(q), 100,'zh-TW')
-    print(googleurl)
+    googleurl = 'https://www.google.com/?num=100'
     driver.get(googleurl)
     time.sleep(6)
+    send_kw_elmt = driver.find_element(By.XPATH, '/html/body/div[1]/div[3]/form/div[1]/div[1]/div[1]/div/div[2]/input')
+    send_kw_elmt.send_keys(q)
+    time.sleep(3)
+    send_kw_elmt.send_keys(Keys.ENTER)
+    time.sleep(6)
 
-    elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
+    elmts=driver.find_elements(By.XPATH,"//div[@class='yuRUbf']/a")
 
     idx=1
     ranking=-1
-    print(len(elmts))
+    print('網頁數量',len(elmts))
 #    driver.save_screenshot('c:/tmp/test.png')
     if 'site' in q:
         href = elmts[0].get_attribute('href')
@@ -80,8 +85,8 @@ def process_query(qs):
         if len(txt)>10:
             if domain in href:
                 print('clicked....')
-                print(href)
-                print(txt)
+                print('點擊網址',href)
+                print('標題',txt)
                 print("ranking", idx)
                 table.insert({'kw':q,'domain':domain,'ranking':idx,'title':txt,'url':href,'dt':datetime.datetime.now()})
                 webdriver.ActionChains(driver).move_to_element(elmt).perform()
@@ -89,7 +94,7 @@ def process_query(qs):
                 time.sleep(5)
                 break
         idx+=1
-
+    db.close()
 def run_once(q):
     global driver
     result=[]
@@ -108,7 +113,6 @@ def run_once(q):
     driver.delete_all_cookies()
     driver.set_window_size(1400,1000)
 
-    print('到此')
     process_query(q)
     time.sleep(3)
     driver.quit()
@@ -126,7 +130,6 @@ def run_once(q):
 class JParams(object):
 
   def get(self, kw,domain,port):
-    print('關鍵字',kw)
     run_once( (kw,domain,port)   )
 
 

+ 3 - 3
INNNews/run_sheet_2.py

@@ -22,7 +22,7 @@ def run_once(pport, dockername):
     db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
     lst = []
     
-    cursor = db.query('select term, domain from seo.selected_kw where client!="毛怪"')
+    cursor = db.query('select term, domain from seo.selected_kw where client not in ("毛怪")')
     for c in cursor:
         lst.append([c['term'], c['domain']])
     
@@ -38,7 +38,7 @@ def run_once(pport, dockername):
     if intval == -1:
         print('-1')
         sys.exit()
-
+    db.close()
 
 def run_specific(pport, dockername):
     db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
@@ -59,7 +59,7 @@ def run_specific(pport, dockername):
     if intval == -1:
         print('-1')
         sys.exit()
-
+    db.close()
 
 class JParams(object):
     

+ 5 - 5
INNNews/run_sns.py

@@ -22,7 +22,7 @@ def run_once(pport, dockername):
     db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
     lst = []
     
-    cursor = db.query('select term, url, client from seo.sns_kw')
+    cursor = db.query('select * from seo.sns_kw')
     for c in cursor:
         lst.append([c['term'], c['url'], c['client'], c['domain']])
     
@@ -40,20 +40,20 @@ def run_once(pport, dockername):
     if intval == -1:
         print('-1')
         sys.exit()
-
+    db.close()
 
 class JParams(object):
     
     def get(self, port=9222):
         while True:
             try:
-                os.system('docker container restart tiny6')
+                os.system('docker container restart tiny7')
                 time.sleep(1)
-                run_once(9927, 'tiny6')
+                run_once(9928, 'tiny7')
                 time.sleep(20)
                 break
             except:
-                os.system('docker container restart tiny6')
+                os.system('docker container restart tiny7')
                 time.sleep(15)
 
 if __name__ == '__main__':

+ 12 - 10
INNNews/sns_clickbot.py

@@ -8,6 +8,7 @@ import urllib.parse
 from selenium.webdriver.support.ui import WebDriverWait
 from selenium.webdriver.common.by import By
 from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.keys import Keys
 from selenium.webdriver.support import expected_conditions as EC
 import codecs
 import random
@@ -22,8 +23,6 @@ import fire
 #pymysql.install_as_MySQLdb()
 
 
-db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
-table=db['sns_log']
 driver = None
 
 
@@ -48,21 +47,24 @@ def empty_query(q):
 
 
 def process_query(qs):
+    db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
+    table=db['sns_log']
     q=qs[0]
     url=qs[1]
     client=qs[2]
     domain=qs[3]
     global driver
-    escaped_search_term = urllib.parse.quote(q)
-    googleurl = 'https://www.google.com/search?q={}&num={}&hl={}'.format(escaped_search_term, 100, 'zh-TW')
-    print(googleurl)
+    googleurl = 'https://www.google.com/?num=100'
     driver.get(googleurl)
+    time.sleep(6)
+    send_kw_elmt = driver.find_element(By.XPATH, '/html/body/div[1]/div[3]/form/div[1]/div[1]/div[1]/div/div[2]/input')
+    send_kw_elmt.send_keys(q)
+    time.sleep(3)
+    send_kw_elmt.send_keys(Keys.ENTER)
+    time.sleep(6)
     time.sleep(10)
-    # fname=term.replace(' ','_')
-    # driver.save_screenshot('c:/tmp/seo/'+fname+'.png')
-    # df=pd.DataFrame()
 
-    elmts = driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
+    elmts = driver.find_elements(By.XPATH,"//div[@class='yuRUbf']/a")
     print('網頁數量',len(elmts))
     idx = 1
     for elmt in elmts:
@@ -80,7 +82,7 @@ def process_query(qs):
                 time.sleep(5)
                 break
         idx+=1
-
+    db.close()
 def run_once(q):
     global driver
     s = Service('/root/driver/chromedriver')

+ 5 - 3
SEO/news_clickbot.py

@@ -53,10 +53,12 @@ def restart_browser(pport):
 
 def process_one(pport):
     db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
-    lst = ['好睡王 引新聞','好睡王 Yahoo','好睡王 HiNet','好睡王 PCHOME','好睡王 蕃新聞','好睡王 新浪','好睡王 台北郵報','好睡王 LIFE.tw','好睡王 match生活網','好睡王炎炎夏日 POPDAILY','好睡王 LINE TODAY']
+    lst = []
     table = db['news_log']
-
-    for term in lst[7::]:
+    cursor = db.query("select * from seo.news_kw")
+    for c in cursor:
+        lst.append([c['term']])
+    for term in lst:
         print(term)
         logger.debug('[clickbot_100][' + term + ']')
         driver = restart_browser(pport)

+ 100 - 0
choozmo/ads_csv_gdn.py

@@ -0,0 +1,100 @@
+import csv
+import sys
+import codecs
+import pandas as pd
+
+with codecs.open('/Users/zooeytsai/Documents/220628有夠讚GDN.csv', 'r', 'utf-16') as csvfile:
+    spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
+    # df = pd.read_csv('/Users/zooeytsai/Documents/220628有夠讚GDN.csv', encoding='utf16')
+    kwdict = {}
+    addict = {}
+    d = {}
+    head = True
+    for row in spamreader:
+        # print(row)
+        if head:
+            head = False
+            continue
+        ll = len(row)
+        campaign = row[0]
+        adgroup = row[19]
+        # print(adgroup)
+        kw = row[47]
+
+        # if len(kw) > 0:
+            # print(campaign)
+            # print(adgroup)
+        if kwdict.get(adgroup) is None:
+            kwdict[adgroup] = []
+        if addict.get(adgroup) is None:
+            addict[adgroup] = []
+        if d.get(adgroup) is None:
+            d[adgroup] = []
+        kwdict[adgroup].append(kw)
+
+        for i in range(59, 65): #加了Long headline
+            hline = row[i]
+            # print(hline)
+            if len(hline) > 0:
+                print(addict[adgroup])
+                addict[adgroup].append(hline)
+        for i in range(65, 67):
+            hline = row[i]
+            # print(i, hline)
+            if len(hline) > 0:
+                d[adgroup].append(hline)
+        # print(addict)
+fw = codecs.open('/Users/zooeytsai/Documents/有夠讚GDN廣告datastudio.csv', 'w', 'utf-8')
+fw.write("活動,群組,關鍵字,廣告標題,廣告內容")
+for k, v in kwdict.items():
+    kwlen = len(v)
+    adlen = len(addict[k])
+    totlen = max(kwlen, adlen)
+    for i in range(totlen):
+        print(campaign)
+        try:
+            kw = v[i]
+        except:
+            kw = ' '
+        try:
+            ad = addict[k][i]
+            # print(ad)
+        except:
+            ad = ' '
+        try:
+            ad1 = d[k][i]
+            # print(ad)
+        except:
+            ad1 = ' '
+        data = "\n" + campaign + "," + k + "," + kw + "," + ad + "," + ad1
+        fw.write(data)
+
+fw.close()
+
+#    for k,v in addict.items():
+#        print('rectangle '+k+'_ad'+' {')
+#        print('card '+k+'廣告'+' [')
+
+#        for itm in v:
+#            print(itm)
+#        print(']')
+#        print('}')
+
+
+#        print(campaign+' -[#black]-> '+k+'_ad')
+
+
+#    print(kwdict)
+#    print(addict)
+
+
+#    for row in spamreader:
+#        ll=len(row)
+#
+#        for i in range(ll):
+#            print(i)
+#            print(row[i])
+#        break
+
+#
+#    sys.exit()

+ 96 - 0
choozmo/ads_csv_local.py

@@ -0,0 +1,96 @@
+import csv
+import sys
+import codecs
+import pandas as pd
+
+with codecs.open('/Users/zooeytsai/Documents/220704有夠讚地標廣告.csv', 'r', 'utf-16') as csvfile:
+    spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
+    # df = pd.read_csv('/Users/zooeytsai/Documents/220628有夠讚GDN.csv', encoding='utf16')
+    kwdict = {}
+    addict = {}
+    campagindict = {}
+    d = {}
+    head = True
+    for row in spamreader:
+        # print(row)
+        if head:
+            head = False
+            continue
+        ll = len(row)
+        campaign = row[0]
+        adgroup = row[18]
+        # print(adgroup)
+        # print(adgroup)
+        # kw = row[47]
+
+        # if len(kw) > 0:
+            # print(campaign)
+            # print(adgroup)
+        if kwdict.get(adgroup) is None:
+            kwdict[adgroup] = []
+        if addict.get(adgroup) is None:
+            addict[adgroup] = []
+        if d.get(adgroup) is None:
+            d[adgroup] = []
+        # kwdict[adgroup].append(kw)
+
+        for i in range(54, 59): #headline
+            hline = row[i]
+            # print(hline)
+            if len(hline) > 0:
+                # print(addict[adgroup])
+                addict[adgroup].append(hline)
+        for i in range(59, 64): #Description
+            hline = row[i]
+            # print(i, hline)
+            if len(hline) > 0:
+                d[adgroup].append(hline)
+    # print(addict)
+fw = codecs.open('/Users/zooeytsai/Documents/有夠讚地標廣告datastudio.csv', 'w', 'utf-8')
+fw.write("群組,廣告標題,廣告內容")
+
+
+for k,v in addict.items():
+    for i in range(len(v)):
+        try:
+            ad = addict[k][i]
+            # print(ad)
+        except:
+            ad = ' '
+        try:
+            ad1 = d[k][i]
+            print(ad1)
+        except:
+            ad1 = ' '
+        data = "\n" + k + "," + ad + "," + ad1
+        fw.write(data)
+
+fw.close()
+
+#    for k,v in addict.items():
+#        print('rectangle '+k+'_ad'+' {')
+#        print('card '+k+'廣告'+' [')
+
+#        for itm in v:
+#            print(itm)
+#        print(']')
+#        print('}')
+
+
+#        print(campaign+' -[#black]-> '+k+'_ad')
+
+
+#    print(kwdict)
+#    print(addict)
+
+
+#    for row in spamreader:
+#        ll=len(row)
+#
+#        for i in range(ll):
+#            print(i)
+#            print(row[i])
+#        break
+
+#
+#    sys.exit()

+ 132 - 0
website_clickjobs/bennisclickjob.py

@@ -0,0 +1,132 @@
+import time
+from datetime import datetime
+import json
+from selenium import webdriver
+from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
+import time
+import os
+import urllib.parse
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+import codecs
+import random
+import requests
+import dataset
+import traceback
+import sys
+from selenium.webdriver.common.keys import Keys
+
+target_domain=['bennis.com.tw']
+brands={'bennis.com.tw':'班尼斯'}
+
+driver=None
+headers = {
+        "Authorization": "Bearer " + "t35vhZtWNgvDNWHc3DJh0OKll3mcB9GvC8K2EAkBug2",
+        "Content-Type": "application/x-www-form-urlencoded"
+}
+
+
+
+def send_msg(kw):
+    params = {"message": "處理關鍵字: "+kw}  
+    r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
+
+
+def empty_query(q):
+    global driver
+    googleurl='https://www.google.com/search?q='+urllib.parse.quote(q)
+    driver.get(googleurl)
+    time.sleep(3)
+
+
+def process_query():
+    q="班尼斯"
+    domain="bennis.com.tw"
+    global driver
+    driver.get('https://www.google.com?num=100')
+    time.sleep(3)
+    print(driver.current_url)
+
+    # elmts=driver.find_elements_by_xpath("//div[@class='yuRUbf']/a")
+    # ABOVE METHOD IS DEPRECATED STARTING SELENIUM 4.3.0, USE THIS
+    #
+    elmt = driver.find_element(By.XPATH, "//input[@name='q']")
+    time.sleep(1)
+
+    elmt.send_keys(q)
+    elmt.send_keys(Keys.ENTER)
+
+    idx=1
+    ranking=-1
+    domain_in_link = 0
+
+    googleurl = driver.current_url
+    print(driver.current_url)
+
+    elmts=driver.find_elements("xpath","//div[@class='yuRUbf']/a")
+
+    print (len(elmts))
+    # driver.save_screenshot('c:/tmp/test.png')
+
+    for el in elmts:
+        href=el.get_attribute('href')
+        txt=el.text
+        if len(txt)>10:
+            if domain in href:
+                domain_in_link += 1
+                print('clicked....')
+                print(href)
+                print(txt)
+                webdriver.ActionChains(driver).move_to_element(el).perform()
+                webdriver.ActionChains(driver).move_to_element(el).click().perform()
+                time.sleep(6)
+
+                if domain in target_domain:
+                    print("Target link found")
+                    time_stamp = datetime.fromtimestamp(time.time())
+                    time_stamp = time_stamp.strftime("%Y-%m-%d %H:%M:%S")
+                    db['click_results'].insert({"time_stamp": time_stamp, "brand": brands[domain], "domain": domain, "query": q, "url": href, "content": txt})
+                break
+
+    if domain in target_domain:
+        print("Target domain found")
+        time_stamp = datetime.fromtimestamp(time.time())
+        time_stamp = time_stamp.strftime("%Y-%m-%d %H:%M:%S")
+        db['query_results'].insert({"time_stamp": time_stamp, "brand": brands[domain], "domain": domain, "query": q, "googleurl": googleurl, "element_count": len(elmts), "domain_in_link_count": domain_in_link})
+            
+
+    print(domain_in_link)
+    
+
+def run_once():
+    global driver
+    result=[]
+    options = webdriver.ChromeOptions()
+    options.add_argument('--headless')
+#    options.add_argument("--user-agent=" +user_agent)
+    options.add_argument("--incognito")
+    options.add_argument('--no-sandbox')
+    options.add_argument('--disable-dev-shm-usage')
+
+    driver = webdriver.Chrome(
+    options=options)
+
+    driver.delete_all_cookies()
+    driver.set_window_size(1400,1000)
+
+    process_query()
+    time.sleep(3)
+    driver.quit()
+
+#execution starts here
+db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/seo?charset=utf8mb4')
+
+while True:
+    try:
+        run_once()
+    except:
+        traceback.print_exc()
+    sleepint=random.randint(35,50)
+    print("Completed (" + str(sleepint) + ")")
+    time.sleep(sleepint)

+ 2 - 2
website_clickjobs/clickjob.py

@@ -116,5 +116,5 @@ while True:
         run_once( (c['term'],c['domain'])   )
     except:
         traceback.print_exc()
-    #sleepint=random.randint(10,15)
-    #time.sleep(sleepint)
+    sleepint=random.randint(20,40)
+    time.sleep(sleepint)