123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177 |
- from enum import Enum
- import time
- from datetime import datetime
- from typing import Optional
- from pydantic import BaseModel
- from fastapi import FastAPI, Query, UploadFile, File
- from fastapi import FastAPI
- from fastapi.middleware.cors import CORSMiddleware
- import json
- import urllib.request
- import dataset,json
- import pandas as pd
- from gspread_pandas import Spread, Client
- from oauth2client.service_account import ServiceAccountCredentials
- import gspread
- from first import first
- app = FastAPI()
- origins = [
- "http://172.105.205.52",
- "http://172.105.205.52:8001",
- ]
- app.add_middleware(
- CORSMiddleware,
- # allow_origins=origins,
- allow_origins=["*"],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
- )
- db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/NFTBoard?charset=utf8mb4')
- class subreq(BaseModel):
- email:str
- class ContactUs(BaseModel):
- country_code: str
- username: str
- email: str
- phone: str
- coop_dif: Optional[bool] = 0
- coop_ad: Optional[bool] = 0
- coop_acting: Optional[bool] = 0
- coop_marketing: Optional[bool] = 0
- work_address: Optional[str] = ''
- @app.get("/showdata/{limit}")
- async def showdata(limit):
- # url = 'https://api.coinmarketcap.com/data-api/v3/nft/collections?start=0&limit=' + limit
- url = 'https://api.coinmarketcap.com/data-api/v3/nft/collections?start=0&limit=' + limit + '&period=2'
- r = urllib.request.urlopen(url)
- collections = json.loads(r.read())['data']['collections']
- reqs = []
- for req in collections:
- website = '#'
- if req.get('website') is not None:
- website = req['website']
- collection_net_worth = round(float(req['netWorth']), 2)
- volume_7d = round(float(req['volume7d']), 2)
- volume_all_time = round(float(req['volumeAT']), 2)
-
- reqs.append({'rank':req['rank'],'slug':req['slug'],'logo':req['logo'],'website':website,'name':req['name'],'assets':req['assets'],'collection_net_worth':collection_net_worth,'volume_7d':volume_7d,'sales_7d':req['sales7d'],'volume_all_time':volume_all_time,'sales_all_time':req['salesAT'],'owners':req['owners']})
- return reqs
- @app.post("/add_sub")
- async def add_subclient_info(req:subreq):
- code = 1
- sub_table = db['Subscribe']
- time_stamp = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
- statement='SELECT COUNT(1) FROM Subscribe WHERE email = "'+req.email+'"'
- if first(db.query(statement))['COUNT(1)']==0:
- pk = sub_table.insert({'email':req.email,'timestamp':time_stamp})
- make_ci_df()
- code = 0
- return {'msg':code}#0 means succss added, 1 means email is duplicated
- @app.post('/add_contact_us')
- async def add_contact_us_client_info(req: ContactUs):
- """ 新增Contact Us資料,先存入DB,並從DB撈回全部資料,更新Google Sheet. """
- code = 1
- table = db['ContactUs']
- time_stamp = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
- # 會根據email做判斷,此處先comment給測試使用。
- # statement = 'SELECT COUNT(1) FROM ContactUs WHERE email = "'+ req.email +'"'
- # if first(db.query(statement))['COUNT(1)'] == 0:
- # table.insert(
- # {
- # 'country_code':req.country_code,
- # 'username': req.username,
- # 'email': req.email,
- # 'phone': req.phone,
- # 'coop_dif': req.coop_dif,
- # 'coop_ad': req.coop_ad,
- # 'coop_acting': req.coop_acting,
- # 'coop_marketing': req.coop_marketing,
- # 'work_address': req.work_address,
- # 'timestamp': time_stamp
- # }
- # )
- # code = 0
- # # 更新sheet
- # make_contact_us_df()
- table.insert(
- {
- 'country_code':req.country_code,
- 'username': req.username,
- 'email': req.email,
- 'phone': req.phone,
- 'coop_dif': req.coop_dif,
- 'coop_ad': req.coop_ad,
- 'coop_acting': req.coop_acting,
- 'coop_marketing': req.coop_marketing,
- 'work_address': req.work_address,
- 'timestamp': time_stamp
- }
- )
- code = 0
- # 更新sheet
- make_contact_us_df()
- return {'msg':code}#0 means succss added, 1 means email is duplicated
- @app.get("/get_sub_list")
- async def add_subclient_info():
- statement = 'SELECT * FROM Subscribe'
- result = []
- for row in db.query(statement):
- result += [{'id':row['id'],'email':row['email'],'timeStamp':row['timestamp']}]
- return result
- def make_ci_df():
- db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/NFTBoard?charset=utf8mb4')
- statement = 'SELECT * FROM Subscribe ORDER BY timestamp DESC'
- #2021-05-23 15:57:43
- df = pd.DataFrame()
- for row in db.query(statement):
- date_format = "%Y-%M-%d %H:%M:%S"
- fdate = row['timestamp'].strftime('%Y-%m-%d %H時 %M分')
- df = df.append({'serial id':row['id'],'e-mail':row['email'],'登記時間':fdate}, ignore_index=True)
- df = df[['serial id','e-mail','登記時間']]
- save_sheet(df,'subscribe_rec','Sheet1')
- def make_contact_us_df():
- db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/NFTBoard?charset=utf8mb4')
- statement = 'SELECT * FROM ContactUs ORDER BY timestamp DESC'
- df = pd.DataFrame()
- for row in db.query(statement):
-
- df = df.append({'id': row['id'], '國家區域': row['country_code'], '姓名': row['username'], '聯絡Email': row['email'], '聯絡手機': row['phone'], '合作方式_異業合作': row['coop_dif'], '合作方式_廣告刊登': row['coop_ad'], '合作方式_代理發行': row['coop_acting'], '合作方式_知識產權行銷': row['coop_marketing'], '作品連結': row['work_address'], '申請時間': row['timestamp']}, ignore_index=True)
- df = df[['id', '國家區域', '姓名', '聯絡Email', '聯絡手機', '合作方式_異業合作', '合作方式_廣告刊登', '合作方式_代理發行', '合作方式_知識產權行銷', '作品連結', '申請時間']]
- # print(df)
- save_sheet(df, '聯絡NFTBoard (回應)', 'Sheet1', json_name='cred.json')
- def save_sheet(df,filename,tabname,startpos='A1', json_name='spread2.json'):
- scope = ['https://spreadsheets.google.com/feeds',
- 'https://www.googleapis.com/auth/drive']
- credentials = ServiceAccountCredentials.from_json_keyfile_name(json_name, scope)
- gc = gspread.authorize(credentials)
- spread = Spread(filename,creds=credentials)
- spread.df_to_sheet(df, sheet=tabname, start=startpos, replace=True, index=False)
|