Jason 2 роки тому
коміт
4972345910
2 змінених файлів з 403 додано та 0 видалено
  1. 2 0
      .gitignore
  2. 401 0
      main.py

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+*.json
+*.xlsx

+ 401 - 0
main.py

@@ -0,0 +1,401 @@
+from __future__ import print_function
+
+import os.path
+import google.auth
+import datetime as dt
+import math
+
+from google.auth.transport.requests import Request
+from google.oauth2.credentials import Credentials
+from google_auth_oauthlib.flow import InstalledAppFlow
+from googleapiclient.discovery import build
+from googleapiclient.errors import HttpError
+from oauth2client.service_account import ServiceAccountCredentials
+
+import pandas as pd
+import datetime as dt
+import pymysql
+pymysql.install_as_MySQLdb()
+import gspread
+import requests
+
+from oauth2client.service_account import ServiceAccountCredentials
+
+# SETTINGS, FOR DEBUG USE ONLY -- SET TO 0 IF UNUSED, ANY OTHER VALUE WILL TRIGGER THE FUNCTION
+makepres = 0 # generates presentation files, files will be under service/報告 folder
+notifygroup = 0 # notify to LINE group
+
+# If modifying these scopes, delete the file token.json.
+GOOGLE_APPLICATION_CREDENTIALS="choozmomain-6c1dffe1ee7e.json"
+SCOPES = ['https://www.googleapis.com/auth/drive']
+
+creds = None
+# The file token.json stores the user's access and refresh tokens, and is
+# created automatically when the authorization flow completes for the first
+# time.
+if os.path.exists('token.json'):
+    creds = Credentials.from_authorized_user_file('token.json', SCOPES)
+# If there are no (valid) credentials available, let the user log in.
+if not creds or not creds.valid:
+    if creds and creds.expired and creds.refresh_token:
+        creds.refresh(Request())
+    else:
+        flow = InstalledAppFlow.from_client_secrets_file(
+            'credentials.json', SCOPES)
+        creds = flow.run_local_server(port=0)
+    # Save the credentials for the next run
+    with open('token.json', 'w') as token:
+        token.write(creds.to_json())
+
+gc = gspread.authorize(creds)
+sheet = gc.open_by_key('1zpE8NpF7wzGppLlx95V785rnQotapzP-8fZSFOP8Ouk')
+sheet_instance = sheet.get_worksheet(0)
+records_data = sheet_instance.get_all_records()
+
+clients = []
+for rd in records_data:
+    clients.append([rd["client"], rd["alias"].split(','), rd['sheetsid'], rd['discontinued']])
+print(clients) # from left to right: client name, client alias, sheets ID, discontinued flag
+
+# The ID of the base presentation.
+PRESENTATION_ID = '1dLmTtHF0ZmrXkEDLXOcXaH7NgL6qYZtELVHMGT1Qe7E'
+
+def notify_group(msg):
+    gid='WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD'
+    headers = {"Authorization": "Bearer " + gid,"Content-Type": "application/x-www-form-urlencoded"}
+    r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params={"message": msg})
+
+def copy_presentation(presentation_id, copy_title):
+    """
+           Creates the copy Presentation the user has access to.
+           Load pre-authorized user credentials from the environment.
+           TODO(developer) - See https://developers.google.com/identity
+           for guides on implementing OAuth2 for the application.
+           """
+
+    #creds, _ = google.auth.default()
+    # pylint: disable=maybe-no-member
+    try:
+        drive_service = build('drive', 'v3', credentials=creds)
+        body = {
+            'name': copy_title
+        }
+        drive_response = drive_service.files().copy(
+            fileId=presentation_id, body=body).execute()
+        presentation_copy_id = drive_response.get('id')
+
+    except HttpError as error:
+        print(f"An error occurred: {error}")
+        print("Presentations  not copied")
+        return error
+
+    return presentation_copy_id
+
+def totaling(data):
+    d = list(zip(*data))
+    res = []
+    for i in d:
+        try:
+            total=0
+            for j in i:
+                total = total + float(j)
+            res.append(total)
+        except:
+            res.append("NA") # we dont need all the fields anyway, some fields are in text
+    print(res)
+    return res
+
+def process_worksheet(ws, client, n):
+    data = []
+    r = ws.get_all_values()
+    r1 = r[12]
+    r2 = totaling(list(r[13:]))
+    data.append(r1)
+    data.append(r2)
+    f = pd.DataFrame(data)
+    f.columns = f.iloc[0] 
+    f = f[1:]
+
+    f.to_excel(client + str(n) + " f.xlsx")
+    print(f)
+    return f
+
+def getValue(before, after):
+    res = after-before
+    if res > 0:
+        value = "{:,.2f}".format(res)
+        return "↑ " + (value.rstrip('0').rstrip('.') if '.' in value else value)
+    elif res == 0:
+        return "→ 0"
+    else:
+        value = "{:,.2f}".format(abs(res))
+        return "↓ " + (value.rstrip('0').rstrip('.') if '.' in value else value)
+
+def checkFloat(x):
+    print(x)
+    if x % 1 == 0.99 or x % 1 == 0.01:
+        return round(x)
+    return x
+
+def gather(client, source):
+    data = {}
+    
+    #ads data here
+    
+    fields1 = ["Clicks (Compare to)", "Impr. (Compare to)", "Avg. CPC (Compare to)", "Avg. CPM (Compare to)", "Conversions (Compare to)", "Cost (Compare to)", "Clicks", "Impr.", "Avg. CPC", "Avg. CPM", "Conversions", "Cost", ]
+    fields2 = ["點擊 (比較對象)", "曝光 (比較對象)", "平均單次點擊出價 (比較對象)", "平均單次點擊出價 (比較對象)", "所有轉換 (比較對象)", "費用 (比較對象)","點擊", "曝光", "平均單次點擊出價", "平均單次點擊出價", "所有轉換", "費用"]
+    
+    credentials = ServiceAccountCredentials.from_json_keyfile_name(GOOGLE_APPLICATION_CREDENTIALS, SCOPES)
+    gc = gspread.authorize(credentials)
+
+    sh = gc.open_by_key(source)
+
+    msg = ""
+
+    file = process_worksheet(sh.worksheet(client + ' 前一週D'), client, 1)
+    file2 = process_worksheet(sh.worksheet(client + ' 前一週S'), client, 2)
+
+    # quick testing to determine which field names to use -- in English (fields1) or Chinese (fields2)
+    try:
+        print(file[fields1[0]].values[0])
+        fields = fields1
+    except:
+        fields = fields2
+    
+    for f in fields:  # if NA then it is assumed to be 0 -- we still need to calculate the delta
+        if file[f].values[0] == "NA":
+            val1 = 0.00
+            file[f].values[0] = 0.00
+        else:
+            val1 = file[f].values[0]
+
+        if file2[f].values[0] == "NA":
+            val2 = 0.00
+            file2[f].values[0] = 0.00
+        else:
+            val2 = file2[f].values[0]
+
+        msg += f + ": " + str(val1) + " / " + str(val2) + "\n"
+    
+    # well, you cant have decimals in clicks or impressions or conversions
+    file[fields[0]].values[0] = checkFloat(file[fields[0]].values[0])
+    file[fields[6]].values[0] = checkFloat(file[fields[6]].values[0])
+    file2[fields[0]].values[0] = checkFloat(file2[fields[0]].values[0])
+    file2[fields[6]].values[0] = checkFloat(file2[fields[6]].values[0])
+    file[fields[1]].values[0] = checkFloat(file[fields[1]].values[0])
+    file[fields[7]].values[0] = checkFloat(file[fields[7]].values[0])
+    file2[fields[1]].values[0] = checkFloat(file2[fields[1]].values[0])
+    file2[fields[7]].values[0] = checkFloat(file2[fields[7]].values[0])
+    file[fields[4]].values[0] = checkFloat(file[fields[4]].values[0])
+    file[fields[10]].values[0] = checkFloat(file[fields[10]].values[0])
+    file2[fields[4]].values[0] = checkFloat(file2[fields[4]].values[0])
+    file2[fields[10]].values[0] = checkFloat(file2[fields[10]].values[0])
+
+    data["{{v111}}"] = file[fields[0]].values[0]
+    data["{{v211}}"] = file[fields[1]].values[0]
+    data["{{v311}}"] = file[fields[2]].values[0]
+    data["{{v411}}"] = file[fields[3]].values[0]
+    data["{{v511}}"] = file[fields[4]].values[0]
+    data["{{v611}}"] = file[fields[5]].values[0]
+    data["{{v121}}"] = file[fields[6]].values[0]
+    data["{{v221}}"] = file[fields[7]].values[0]
+    data["{{v321}}"] = file[fields[8]].values[0]
+    data["{{v421}}"] = file[fields[9]].values[0]
+    data["{{v521}}"] = file[fields[10]].values[0]
+    data["{{v621}}"] = file[fields[11]].values[0]
+
+    data["{{v112}}"] = file2[fields[0]].values[0]
+    data["{{v212}}"] = file2[fields[1]].values[0]
+    data["{{v312}}"] = file2[fields[2]].values[0]
+    data["{{v412}}"] = file2[fields[3]].values[0]
+    data["{{v512}}"] = file2[fields[4]].values[0]
+    data["{{v612}}"] = file2[fields[5]].values[0]
+    data["{{v122}}"] = file2[fields[6]].values[0]
+    data["{{v222}}"] = file2[fields[7]].values[0]
+    data["{{v322}}"] = file2[fields[8]].values[0]
+    data["{{v422}}"] = file2[fields[9]].values[0]
+    data["{{v522}}"] = file2[fields[10]].values[0]
+    data["{{v622}}"] = file2[fields[11]].values[0]
+    
+    data["{{v113}}"] = float(file[fields[0]].values[0]) + float(file2[fields[0]].values[0])
+    data["{{v213}}"] = float(file[fields[1]].values[0]) + float(file2[fields[1]].values[0])
+    data["{{v313}}"] = float(file[fields[2]].values[0]) + float(file2[fields[2]].values[0])
+    data["{{v413}}"] = float(file[fields[3]].values[0]) + float(file2[fields[3]].values[0])
+    data["{{v513}}"] = float(file[fields[4]].values[0]) + float(file2[fields[4]].values[0])
+    data["{{v613}}"] = float(file[fields[5]].values[0]) + float(file2[fields[5]].values[0])
+    data["{{v123}}"] = float(file[fields[6]].values[0]) + float(file2[fields[6]].values[0])
+    data["{{v223}}"] = float(file[fields[7]].values[0]) + float(file2[fields[7]].values[0])
+    data["{{v323}}"] = float(file[fields[8]].values[0]) + float(file2[fields[8]].values[0])
+    data["{{v423}}"] = float(file[fields[9]].values[0]) + float(file2[fields[9]].values[0])
+    data["{{v523}}"] = float(file[fields[10]].values[0]) + float(file2[fields[10]].values[0])
+    data["{{v623}}"] = float(file[fields[11]].values[0]) + float(file2[fields[11]].values[0])
+
+    data["{{1d1}}"] = getValue(file[fields[0]].values[0],file[fields[6]].values[0]) 
+    data["{{2d1}}"] = getValue(file[fields[1]].values[0],file[fields[7]].values[0])
+    data["{{3d1}}"] = getValue(file[fields[2]].values[0],file[fields[8]].values[0])
+    data["{{4d1}}"] = getValue(file[fields[3]].values[0],file[fields[9]].values[0])
+    data["{{5d1}}"] = getValue(file[fields[4]].values[0],file[fields[10]].values[0])
+    
+    data["{{1d2}}"] = getValue(file2[fields[0]].values[0],file2[fields[6]].values[0]) 
+    data["{{2d2}}"] = getValue(file2[fields[1]].values[0],file2[fields[7]].values[0])
+    data["{{3d2}}"] = getValue(file2[fields[2]].values[0],file2[fields[8]].values[0])
+    data["{{4d2}}"] = getValue(file2[fields[3]].values[0],file2[fields[9]].values[0])
+    data["{{5d2}}"] = getValue(file2[fields[4]].values[0],file2[fields[10]].values[0])
+    
+    data["{{1d3}}"] = getValue(file[fields[0]].values[0] + file2[fields[0]].values[0],file[fields[6]].values[0] + file2[fields[6]].values[0]) 
+    data["{{2d3}}"] = getValue(file[fields[1]].values[0] + file2[fields[1]].values[0],file[fields[7]].values[0] + file2[fields[7]].values[0])
+    data["{{3d3}}"] = getValue(file[fields[2]].values[0] + file2[fields[2]].values[0],file[fields[8]].values[0] + file2[fields[8]].values[0])
+    data["{{4d3}}"] = getValue(file[fields[3]].values[0] + file2[fields[3]].values[0],file[fields[9]].values[0] + file2[fields[9]].values[0])
+    data["{{5d3}}"] = getValue(file[fields[4]].values[0] + file2[fields[4]].values[0],file[fields[10]].values[0] + file2[fields[10]].values[0])
+    
+    for i in data:
+        try:
+            value = "{:,.2f}".format(data[i])
+            data[i] = value.rstrip('0').rstrip('.') if '.' in value else value
+        except:
+            pass
+
+    return data
+
+def check(client, data):
+    if client == data[0]:
+        return True
+    for alias in data[1]:
+        if client.upper() == alias:
+            return True
+    return False
+
+def getClientData(): # currently unused
+    client = input("請輸入客戶名稱: ")
+
+    for c in clients:
+        if check(client, c):
+            return c[0], c[2]
+    
+    source = input('請輸入Google Ads 資料來源的Google Sheets ID (應有 "[客戶名稱] 前一周" 的活頁簿 : ')
+    return client, source
+
+def process(c, service):
+    client = c[0]
+    DATA_ID = c[2]
+    print (client, DATA_ID)
+
+    # GATHER DATA HERE
+    data = gather(client, DATA_ID)        
+
+    print(data)
+
+    new_pres = "0000000000"
+
+    if makepres != 0:
+        new_pres = copy_presentation(PRESENTATION_ID, client + "週報" + str(dt.date.today().strftime("%Y%m%d")))
+
+        # Call the Slides API
+        presentation = service.presentations().get(
+            presentationId=new_pres).execute()
+        slides = presentation.get('slides')
+
+        print('The presentation contains {} slides:'.format(len(slides)))
+        for i, slide in enumerate(slides):
+            content = slide.get('pageElements')
+            print('- Slide #{} contains {} elements.'.format(
+                i + 1, len(content)))
+            print(content[0]['objectId'])
+        
+        # ALL THE STUFF YOU WANT TO DO GOES HERE
+        reqs = [
+            {'replaceAllText': {'containsText': {'text': '{{client}}'}, 'replaceText': client}},
+            {'replaceAllText': {'containsText': {'text': '{{reportdate}}'}, 'replaceText': str(dt.date.today().strftime("%Y/%m/%d"))}},
+            {'replaceAllText': {'containsText': {'text': '{{startdate}}'},
+                'replaceText': str(((dt.date.today()) - dt.timedelta(days=7)).strftime("%m/%d"))}},
+            {'replaceAllText': {'containsText': {'text': '{{enddate}}'}, 
+                'replaceText': str(((dt.date.today()) - dt.timedelta(days=1)).strftime("%m/%d"))}}, 
+
+            
+            {'replaceAllText': {'containsText': {'text': '{{v111}}'}, 'replaceText': data["{{v111}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v211}}'}, 'replaceText': data["{{v211}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v311}}'}, 'replaceText': data["{{v311}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v411}}'}, 'replaceText': data["{{v411}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v511}}'}, 'replaceText': data["{{v511}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v611}}'}, 'replaceText': data["{{v611}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v121}}'}, 'replaceText': data["{{v121}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v221}}'}, 'replaceText': data["{{v221}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v321}}'}, 'replaceText': data["{{v321}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v421}}'}, 'replaceText': data["{{v421}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v521}}'}, 'replaceText': data["{{v521}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v621}}'}, 'replaceText': data["{{v621}}"]}},
+
+            {'replaceAllText': {'containsText': {'text': '{{v112}}'}, 'replaceText': data["{{v112}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v212}}'}, 'replaceText': data["{{v212}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v312}}'}, 'replaceText': data["{{v312}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v412}}'}, 'replaceText': data["{{v412}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v512}}'}, 'replaceText': data["{{v512}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v612}}'}, 'replaceText': data["{{v612}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v122}}'}, 'replaceText': data["{{v122}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v222}}'}, 'replaceText': data["{{v222}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v322}}'}, 'replaceText': data["{{v322}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v422}}'}, 'replaceText': data["{{v422}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v522}}'}, 'replaceText': data["{{v522}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v622}}'}, 'replaceText': data["{{v622}}"]}},
+
+            {'replaceAllText': {'containsText': {'text': '{{v113}}'}, 'replaceText': data["{{v113}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v213}}'}, 'replaceText': data["{{v213}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v313}}'}, 'replaceText': data["{{v313}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v413}}'}, 'replaceText': data["{{v413}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v513}}'}, 'replaceText': data["{{v513}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v613}}'}, 'replaceText': data["{{v613}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v123}}'}, 'replaceText': data["{{v123}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v223}}'}, 'replaceText': data["{{v223}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v323}}'}, 'replaceText': data["{{v323}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v423}}'}, 'replaceText': data["{{v423}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v523}}'}, 'replaceText': data["{{v523}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{v623}}'}, 'replaceText': data["{{v623}}"]}},
+            
+            {'replaceAllText': {'containsText': {'text': '{{1d1}}'}, 'replaceText': data["{{1d1}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{2d1}}'}, 'replaceText': data["{{2d1}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{3d1}}'}, 'replaceText': data["{{3d1}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{4d1}}'}, 'replaceText': data["{{4d1}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{5d1}}'}, 'replaceText': data["{{5d1}}"]}},
+
+            {'replaceAllText': {'containsText': {'text': '{{1d2}}'}, 'replaceText': data["{{1d2}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{2d2}}'}, 'replaceText': data["{{2d2}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{3d2}}'}, 'replaceText': data["{{3d2}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{4d2}}'}, 'replaceText': data["{{4d2}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{5d2}}'}, 'replaceText': data["{{5d2}}"]}},
+
+            {'replaceAllText': {'containsText': {'text': '{{1d3}}'}, 'replaceText': data["{{1d3}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{2d3}}'}, 'replaceText': data["{{2d3}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{3d3}}'}, 'replaceText': data["{{3d3}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{4d3}}'}, 'replaceText': data["{{4d3}}"]}},
+            {'replaceAllText': {'containsText': {'text': '{{5d3}}'}, 'replaceText': data["{{5d3}}"]}},
+
+            ]
+
+        service.presentations().batchUpdate(body={'requests':reqs}, presentationId=new_pres, fields='').execute()
+
+    return "https://docs.google.com/presentation/d/" + new_pres # returns link of presentation
+
+
+def main():
+    msg = "以下是客戶的Ads週報,相關負責人員請完成填寫報告內容,謝謝。\n\n"
+
+    try:
+        service = build('slides', 'v1', credentials=creds)
+
+        #client, DATA_ID = getClientData()
+        for c in clients:
+            if c[3]!=1: # if client is not discontinued
+                link = process(c, service)
+                msg += c[0] + ": " + link + "\n"
+
+        print(msg)
+
+        if notifygroup != 0:
+            notify_group(msg)
+
+    except HttpError as err:
+        print(err)
+
+
+if __name__ == '__main__':
+    main()