run4.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. # -*- coding: utf-8 -*-
  2. #from selenium import webdriver
  3. #from tkinter.tix import TEXT
  4. from seleniumwire import webdriver
  5. from selenium.webdriver.common.action_chains import ActionChains
  6. from selenium.webdriver.common.keys import Keys
  7. from selenium.webdriver.support import expected_conditions as EC
  8. from selenium.webdriver.support.wait import WebDriverWait
  9. from selenium.webdriver.common.by import By
  10. from selenium.common.exceptions import TimeoutException
  11. from selenium.common.exceptions import WebDriverException
  12. import selenium
  13. import traceback
  14. from bs4 import BeautifulSoup
  15. from utility import database_access as DA
  16. from utility.parseutils import *
  17. from utility.connect import *
  18. from datetime import datetime
  19. from requests import session
  20. import pandas as pd
  21. import dataset
  22. import time
  23. import json
  24. import re
  25. import sys, os
  26. import socket
  27. import brotli
  28. import pickle
  29. from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
  30. import urllib.parse
  31. chrome_window=False
  32. globalkw=None
  33. proxyport=8787
  34. def write_to_file(jsobj,fname):
  35. with open(fname, 'wb') as handle:
  36. pickle.dump(jsobj, handle, protocol=pickle.HIGHEST_PROTOCOL)
  37. def build_cache(db):
  38. global reviews_table
  39. id_dict={}
  40. cursor = db.query('SELECT fid FROM google_poi.shop_list3;')
  41. for c in cursor:
  42. key = '{}'.format(c['fid'])
  43. id_dict[key]=1
  44. return id_dict
  45. #def brower_start(port):
  46. # global proxyport
  47. # global chrome_window
  48. # print(proxyport)
  49. # options = webdriver.ChromeOptions()
  50. # if chrome_window:
  51. # browser = webdriver.Chrome(
  52. # desired_capabilities=options.to_capabilities()
  53. # )
  54. # else:
  55. # chrome_options = webdriver.ChromeOptions()
  56. # chrome_options.add_argument('--proxy-server=host.docker.internal:'+str(proxyport)) # Specify your Kubernetes service-name here
  57. # chrome_options.add_argument('--ignore-certificate-errors')
  58. # chrome_options.add_argument("--no-sandbox")
  59. # chrome_options.add_argument("--disable-dev-shm-usage")
  60. # browser = webdriver.Remote(
  61. # command_executor='http://127.0.0.1:'+str(port)+'/wd/hub',
  62. # desired_capabilities=chrome_options.to_capabilities(),
  63. # seleniumwire_options={'addr':'0.0.0.0','port':proxyport,'auto_config': False}
  64. # )
  65. # browser.set_window_size(1400,1000)
  66. # return browser
  67. def brower_start(port):
  68. options = webdriver.ChromeOptions()
  69. # browser = webdriver.Chrome(options=options)
  70. options.add_argument('--ignore-certificate-errors')
  71. options.add_argument("--no-sandbox")
  72. options.add_argument("--headless")
  73. options.add_argument("--disable-gpu")
  74. options.add_argument("--disable-dev-shm-usage")
  75. browser = webdriver.Chrome(options=options)
  76. browser.set_window_size(1400,1000)
  77. # browser = webdriver.Remote(
  78. # command_executor='http://127.0.0.1:'+str(port)+'/wd/hub',
  79. # # command_executor='http://192.53.174.202:'+str(port)+'/wd/hub',
  80. # desired_capabilities=options.to_capabilities()
  81. # )
  82. return browser
  83. def get_next_job(db):
  84. result = {}
  85. result = db.query('select * from error_list2 where check_=0 ORDER BY RAND() limit 100')
  86. url_pd = pd.DataFrame([dict(i) for i in result])
  87. url_pd['fid'] = url_pd['item_url'].apply(lambda x: x.split('data=')[1].split('!')[3])
  88. # url_pd['item_url'] = url_pd['fid'].apply(lambda x: 'https://www.google.com.tw/maps/@24.1753633,120.6747136,15z/data=!4m5!3m4!1s{}!8m2!3d24.1760271!4d120.6705323'.format(x))
  89. remove = db.query('select item_url from error_list3')
  90. remove = pd.DataFrame([dict(i) for i in remove])
  91. if len(remove) != 0:
  92. remove_fid_list = remove['item_url'].to_list()
  93. url_pd = url_pd[~url_pd['item_url'].isin(remove_fid_list)]
  94. return url_pd
  95. def parsing_js(resp):
  96. txt = json.loads(resp[5::])
  97. output = {}
  98. output['name'] = txt[6][11]
  99. output['adress_name'] = txt[6][18]
  100. if txt[6][4]:
  101. if txt[6][4][7]:
  102. output['rating'] = str(txt[6][4][7])
  103. else:
  104. output['rating'] = None
  105. if txt[6][4][8]:
  106. output['user_ratings_total'] = str(txt[6][4][8])
  107. else:
  108. output['user_ratings_total'] = None
  109. if txt[6][4][2]:
  110. output['price_level'] = str(len(['$' for i in txt[6][4][2] if i == '$']))
  111. else:
  112. output['price_level'] = None
  113. else:
  114. output['rating'] = None
  115. output['user_ratings_total'] = None
  116. output['price_level'] = None
  117. if txt[6][37][0]:
  118. output['lon'] = txt[6][37][0][0][8][0][1]
  119. output['lat'] = txt[6][37][0][0][8][0][2]
  120. else:
  121. output['lon'] = None
  122. output['lat'] = None
  123. if txt[6][178]:
  124. output['tel'] = txt[6][178][0][0]
  125. else:
  126. output['tel'] = ''
  127. if txt[6][13]:
  128. output['category'] = txt[6][13][0]
  129. else:
  130. output['category'] = ''
  131. try:
  132. location = txt[6][183][2][2][0]
  133. if location:
  134. location_s = location.split(' ')
  135. output['city'], output['area'] = location_s[-1], location_s[-2]
  136. else:
  137. output['city'], output['area'] = '', ''
  138. except:
  139. output['city'], output['area'] = '', ''
  140. if txt[6][100]:
  141. for item in txt[6][100][1]:
  142. name = item[1]
  143. if name not in intro_list.keys(): continue
  144. name_map = intro_list[name]
  145. c = 0
  146. detail = []
  147. for t in item[2]:
  148. value = t[1]
  149. if t[3] == 1:
  150. detail += [{'id':c, name_map[1]:'不提供'+str(value)}]
  151. else:
  152. detail += [{'id':c, name_map[1]:value}]
  153. c += 1
  154. output[name_map[0]] = str(detail)
  155. for key in intro_list:
  156. if intro_list[key][0] not in output.keys():
  157. output[intro_list[key][0]] = '[]'
  158. if txt[6][34]:
  159. output = time_parsing_js(txt[6][34], output)
  160. else:
  161. output['open_now'] = 'False'
  162. output['periods'] = ''
  163. output['weekday_text'] = ''
  164. output['time_status'] = ''
  165. if txt[6][72]:
  166. output['header_image'] = txt[6][72][0][0][6][0]
  167. else:
  168. output['header_image'] = ''
  169. if txt[6][126]:
  170. output['google_url'] = txt[6][126][4]
  171. ludocid_str = [i for i in txt[6][126][4].split('&') if i.find('ludocid') != -1]
  172. if len(ludocid_str) != 0:
  173. ludocid = ludocid_str[0].split('=')[-1]
  174. output['ludocid'] = ludocid
  175. else:
  176. output['google_url'] = ''
  177. # write_to_file(orig,'debug.pickle')
  178. return output
  179. def time_parsing_js(time_json, output):
  180. weekday_text = []
  181. periods = []
  182. for time_ in time_json[1]:
  183. week = time_[0]
  184. weekday_text += ['{}: {}'.format(week, ', '.join(time_[1]))]
  185. for t in time_[1]:
  186. if t == '24 小時營業':
  187. periods += [{
  188. "open":{
  189. "day": week_list[week],
  190. "time": '0000'
  191. },
  192. "close":{
  193. "day": week_list[week],
  194. "time": ''
  195. }
  196. }]
  197. elif t == '休息':
  198. periods += [{
  199. "open":{
  200. "day": week_list[week],
  201. "time": ''
  202. },
  203. "close":{
  204. "day": week_list[week],
  205. "time": ''
  206. }
  207. }]
  208. else:
  209. start, end = t.split('–')
  210. end_hour, end_min = end.split(':')
  211. start_hour, start_min = start.split(':')
  212. if end_hour < start_hour:
  213. end_day = week_list[week] + 1
  214. else:
  215. end_day = week_list[week]
  216. periods += [{
  217. "open":{
  218. "day": week_list[week],
  219. "time": start.replace(':','')
  220. },
  221. "close":{
  222. "day": end_day,
  223. "time": end.replace(':','')
  224. }
  225. }]
  226. output['periods'] = str(periods)
  227. output['weekday_text'] = str(weekday_text)
  228. output['time_status'] = blank_check(time_json[4][4].split('⋅')[0])
  229. if output['time_status'].find('永久停業') != -1 or\
  230. output['time_status'].find('暫時關閉') != -1 or\
  231. output['time_status'].find('暫停營業') != -1:
  232. output['open_now'] = 'False'
  233. else:
  234. output['open_now'] = 'True'
  235. return output
  236. def save_js_to_db(jsobj, fid):
  237. global shop_table
  238. global iddict
  239. jsobj['fid'] = fid
  240. if iddict.get(fid) is None:
  241. try:
  242. shop_table.insert(jsobj)
  243. except:
  244. traceback.print_exc()
  245. def process_web_request_start(driver, fid):
  246. time.sleep(3)
  247. print("start&**********************")
  248. for request in driver.requests:
  249. if request.response:
  250. # print(request.url)
  251. if 'place?' in request.url :
  252. # print('parsing js:')
  253. front, _ = fid.split(':')
  254. if request.url.find(front) != -1:
  255. print(request.url)
  256. resp = brotli.decompress(request.response.body)
  257. jstext = resp.decode('utf-8')
  258. output = parsing_js(jstext)
  259. time.sleep(1)
  260. return output
  261. return 0
  262. def reviews_parsing_js(resp):
  263. columns_name = ['id','author_page','author_name', 'profile_photo_url', 'author_review_count',
  264. 'created_at', 'text', 'photos', 'rating', 'store_review_time','store_review']
  265. jsobj = json.loads(resp[5::])
  266. result = []
  267. for i in range(len(jsobj[2])):
  268. tmp = []
  269. tmp += [jsobj[2][i][6], jsobj[2][i][0][0], jsobj[2][i][0][1], jsobj[2][i][0][2], jsobj[2][i][12][1][1]]
  270. tmp += [jsobj[2][i][1], jsobj[2][i][3]]
  271. # image
  272. image = []
  273. if jsobj[2][i][14]:
  274. for j in range(len(jsobj[2][i][14])):
  275. image += [jsobj[2][i][14][j][6][0]]
  276. tmp += [image]
  277. #rating
  278. tmp += [jsobj[2][i][4]]
  279. # store reply
  280. if jsobj[2][i][9]:
  281. tmp += [jsobj[2][i][9][0], jsobj[2][i][9][1]]
  282. else:
  283. tmp += ['', '']
  284. result.append(list(map(lambda x, y: {x:y}, columns_name, tmp)))
  285. return result
  286. def process_web_request_reviews(driver, output, ludocid):
  287. time.sleep(3)
  288. print("reviews&**********************")
  289. for request in driver.requests:
  290. if request.response:
  291. # print(request.url)
  292. if 'listentitiesreviews?' in request.url :
  293. # print('parsing js:')
  294. if request.url.find(ludocid) != -1:
  295. print(request.url)
  296. resp = brotli.decompress(request.response.body)
  297. jstext = resp.decode('utf-8')
  298. result = reviews_parsing_js(jstext)
  299. output['reviews'] = str(result)
  300. time.sleep(1)
  301. return output
  302. return 0
  303. def photos_parsing_js(resp):
  304. def image_url_change_size(url):
  305. if url.find('streetviewpixels') != -1:
  306. return url
  307. else:
  308. url_split = url.split('=')
  309. new_url = url_split[0] + '=s600-' + '-'.join(url_split[-1].split('-')[-2::])
  310. return new_url
  311. jsobj = json.loads(resp[5::])
  312. # write_to_file(jsobj,'tmp/debug_{}.pickle'.format(c))
  313. menu = []
  314. all = []
  315. photo_category_map = {}
  316. for row in jsobj[12][0]:
  317. photo_category_map[row[0]] = row[2]
  318. if photo_category_map[jsobj[13][0]] == '全部':
  319. for img in jsobj[0][:5]:
  320. all += [image_url_change_size(img[6][0])]
  321. elif photo_category_map[jsobj[13][0]] == '菜單':
  322. for img in jsobj[0][:5]:
  323. menu += [image_url_change_size(img[6][0])]
  324. return menu, all
  325. def process_web_request_photo(driver, output, fid):
  326. try:
  327. driver.find_element(By.CSS_SELECTOR, "button[data-tab-index='0']")
  328. photo_soup = BeautifulSoup(driver.page_source, 'html.parser')
  329. tab_dict = {}
  330. for tab_index in [0, 1, 2]:
  331. selector = photo_soup.select("button[data-tab-index='{}']".format(tab_index))
  332. if len(selector) != 0:
  333. photo_name = selector[0].text
  334. if photo_name == '菜單':
  335. tab_dict[photo_name] = tab_index
  336. elif photo_name == '全部':
  337. tab_dict[photo_name] = tab_index
  338. except:
  339. tab_dict = {}
  340. print(tab_dict)
  341. for tab_ in tab_dict:
  342. tab_index = tab_dict[tab_]
  343. print(tab_index)
  344. wait = WebDriverWait(driver, 60)
  345. wait.until(
  346. EC.element_to_be_clickable((By.CSS_SELECTOR, "button[data-tab-index='{}']".format(tab_index)))
  347. )
  348. element = driver.find_element(By.CSS_SELECTOR, "button[data-tab-index='{}']".format(tab_index))
  349. ActionChains(driver).move_to_element(element).click(element).perform()
  350. time.sleep(1)
  351. print("photo&**********************")
  352. menu_list = []
  353. all_list = []
  354. for request in driver.requests:
  355. if request.response:
  356. # print(request.url)
  357. if 'photo?' in request.url :
  358. # print('parsing js:')
  359. front, _ = fid.split(':')
  360. if request.url.find(front) != -1:
  361. print(request.url)
  362. resp = brotli.decompress(request.response.body)
  363. jstext = resp.decode('utf-8')
  364. menu, all = photos_parsing_js(jstext)
  365. menu_list += menu
  366. all_list += all
  367. output['shop_photo'] = str(all_list[:5])
  368. output['menu_photo'] = str(menu_list[:5])
  369. return output
  370. def main():
  371. global chrome_window
  372. global store_list_table
  373. global shop_table
  374. global proxyport
  375. global iddict
  376. localip=socket.gethostbyname(socket.gethostname())
  377. db = dataset.connect('mysql://choozmo:pAssw0rd@db.ptt.cx:3306/google_poi?charset=utf8mb4')
  378. store_list_table = db['swire_store_list']
  379. shop_table = db['shop_list3']
  380. error_table = db['error_list2']
  381. iddict=build_cache(db)
  382. port=4444
  383. if len(sys.argv) == 3 :
  384. port=int(sys.argv[1])
  385. proxyport=int(sys.argv[2])
  386. if not chrome_window:
  387. print('restart docker pw{}'.format(port))
  388. # os.system('sudo docker container restart p'+str(port))
  389. os.system('sudo docker container restart pw'+str(port))
  390. time.sleep(10)
  391. print('drvier start...')
  392. driver = brower_start(port)
  393. job = get_next_job(db)
  394. c = 0
  395. for row, group in job.iterrows():
  396. try:
  397. item_url = group['item_url']
  398. name = group['name']
  399. num = group['num']
  400. keyword = group['keyword']
  401. fid = group['fid']
  402. if name:
  403. db_name = name
  404. else:
  405. db_name = num
  406. print(fid, keyword, db_name)
  407. print(item_url)
  408. #shop_info
  409. print('parsing shop info....')
  410. for i in range(5):
  411. print('shop info try...{}'.format(i))
  412. driver.get(item_url)
  413. time.sleep(3)
  414. wait = WebDriverWait(driver, 10)
  415. wait.until(
  416. EC.element_to_be_clickable((By.ID, 'sb_cb50'))
  417. )
  418. element = driver.find_element_by_id('sb_cb50')
  419. driver.implicitly_wait(10)
  420. ActionChains(driver).move_to_element(element).click(element).perform()
  421. time.sleep(3)
  422. driver.back()
  423. if driver.current_url == item_url:continue
  424. print(driver.current_url)
  425. output = process_web_request_start(driver, fid)
  426. if output != 0: break
  427. # reivews
  428. print('parsing reviews....')
  429. if not output['user_ratings_total']:
  430. output['reviews'] = ''
  431. else:
  432. for i in range(3):
  433. print('reviews try...{}'.format(i))
  434. try:
  435. wait = WebDriverWait(driver, 30)
  436. more_reviews_css = "button[jsaction='pane.rating.moreReviews']"
  437. wait.until(
  438. EC.element_to_be_clickable((By.CSS_SELECTOR, more_reviews_css))
  439. )
  440. element = driver.find_element_by_css_selector(more_reviews_css)
  441. driver.implicitly_wait(10)
  442. ActionChains(driver).move_to_element(element).click(element).perform()
  443. time.sleep(0.5)
  444. output_ = process_web_request_reviews(driver, output, output['ludocid'])
  445. if output_ != 0:
  446. output = output_
  447. break
  448. except:
  449. driver.get(item_url)
  450. time.sleep(0.5)
  451. if 'reviews' not in output.keys():
  452. continue
  453. # photo
  454. print('parsing photo....')
  455. if output['header_image'] != '':
  456. for i in range(3):
  457. print('photo try...{}'.format(i))
  458. driver.get(item_url)
  459. time.sleep(0.5)
  460. print(driver.current_url)
  461. try:
  462. wait = WebDriverWait(driver, 30)
  463. wait.until(
  464. EC.element_to_be_clickable((By.CSS_SELECTOR, "div[aria-label='{}的相片']".format(output['name'])))
  465. )
  466. element = driver.find_element(By.CSS_SELECTOR, "div[aria-label='{}的相片']".format(output['name']))
  467. ActionChains(driver).move_to_element(element).click(element).perform()
  468. output = process_web_request_photo(driver, output, fid)
  469. break
  470. except:
  471. pass
  472. else:
  473. output['shop_photo'] = '[]'
  474. output['menu_photo'] = '[]'
  475. output['item_url'] = item_url
  476. output['keyword'] = keyword
  477. if output['google_url'] == '':
  478. query_name = output['adress_name'].replace('(','').replace(')', '').replace(' ','')
  479. output['google_url'] = 'https://www.google.com.tw/search?q={}'.format(query_name)
  480. output['crawler_date'] = datetime.today().strftime("%Y/%m/%d %H:%M")
  481. print(output)
  482. save_js_to_db(output, fid)
  483. error_table.upsert({'item_url':item_url,'check_':1},['item_url'])
  484. print('*'*10)
  485. except TimeoutException as e:
  486. traceback.print_exc()
  487. break
  488. except TimeoutException as e:
  489. traceback.print_exc()
  490. break
  491. except:
  492. error_table3 = db['error_list3']
  493. error_table3.insert({'name':name,'keyword':keyword,'item_url':item_url,'crawler_date':datetime.today().strftime("%Y/%m/%d %H:%M")})
  494. traceback.print_exc()
  495. if __name__ == '__main__':
  496. main()