openshot_video_generator.py 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. from os import listdir
  2. from os.path import isfile, isdir, join
  3. import openshot
  4. import threading
  5. import zhtts
  6. import os
  7. import urllib
  8. from typing import List
  9. import requests
  10. from pydantic import BaseModel
  11. from bs4 import BeautifulSoup
  12. from PIL import Image,ImageDraw,ImageFont
  13. import pyttsx3
  14. import rpyc
  15. import random
  16. import re
  17. import time
  18. import math
  19. import dataset
  20. from datetime import datetime
  21. from gtts import gTTS
  22. import ffmpy
  23. from difflib import SequenceMatcher
  24. import difflib
  25. from autosub import DEFAULT_CONCURRENCY
  26. from autosub import DEFAULT_SUBTITLE_FORMAT
  27. from pytranscriber.control.ctr_main import Ctr_Main
  28. from pytranscriber.control.ctr_autosub import Ctr_Autosub
  29. import multiprocessing
  30. from itertools import groupby
  31. from operator import itemgetter
  32. from openUtil.parser import parser
  33. import pandas as pd
  34. import numpy as np
  35. import jieba
  36. import jieba.posseg as pseg
  37. import urllib.request
  38. import librosa
  39. from pydub import AudioSegment
  40. from pydub.silence import split_on_silence
  41. import itertools
  42. from hakkaUtil import *
  43. dir_sound = 'mp3_track/'
  44. dir_photo = 'photo/'
  45. dir_text = 'text_file/'
  46. dir_video = 'video_material/'
  47. dir_title = 'title/'
  48. dir_subtitle = 'subtitle/'
  49. dir_anchor = 'anchor_raw/'
  50. tmp_video_dir = 'tmp_video/'
  51. video_sub_folder = 'ai_anchor_video/'
  52. dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
  53. def notify_group(msg):
  54. glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
  55. for gid in glist:
  56. headers = {
  57. "Authorization": "Bearer " + gid,
  58. "Content-Type": "application/x-www-form-urlencoded"
  59. }
  60. params = {"message": msg}
  61. r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
  62. def cKey(r,g,b,fuzz):
  63. col=openshot.Color()
  64. col.red=openshot.Keyframe(r)
  65. col.green=openshot.Keyframe(g)
  66. col.blue=openshot.Keyframe(b)
  67. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  68. def video_photo_clip(vid=None,layer=None, position=None, end=None
  69. ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
  70. clip = openshot.Clip(vid)
  71. clip.Layer(layer)
  72. clip.Position(position)
  73. clip.End(end)
  74. clip.scale_x=openshot.Keyframe(scale_x)
  75. clip.scale_y=openshot.Keyframe(scale_y)
  76. clip.location_x=openshot.Keyframe(location_x)
  77. clip.location_y=openshot.Keyframe(location_y)
  78. if ck!=None:
  79. clip.AddEffect(ck)
  80. if audio==True:
  81. clip.has_audio=openshot.Keyframe(1)
  82. else:
  83. clip.has_audio=openshot.Keyframe(0)
  84. return clip
  85. def listener_progress(string, percent):
  86. True
  87. def myunichchar(unicode_char):
  88. mb_string = unicode_char.encode('big5')
  89. try:
  90. unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
  91. except NameError:
  92. unicode_char = chr(mb_string[0] << 8 | mb_string[1])
  93. return unicode_char
  94. def get_url_type(url):
  95. print('---------------------------------------------')
  96. print(url)
  97. req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
  98. r = urllib.request.urlopen(req)
  99. contentType = r.getheader('Content-Type')
  100. print(contentType)
  101. print('-------------------------------------------------')
  102. return contentType
  103. def make_dir(name_hash):
  104. for direct in dir_list:
  105. if not os.path.isdir(direct):
  106. os.mkdir(direct)
  107. try:
  108. os.mkdir(dir_photo+name_hash)
  109. except FileExistsError:
  110. print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash , " already exists")
  111. try:
  112. os.mkdir(dir_text+name_hash)
  113. except FileExistsError:
  114. print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash , " already exists")
  115. try:
  116. os.mkdir(dir_sound+name_hash)
  117. except FileExistsError:
  118. print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash , " already exists")
  119. try:
  120. os.mkdir(dir_anchor+name_hash)
  121. except FileExistsError:
  122. print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash , " already exists")
  123. try:
  124. os.mkdir(dir_subtitle+name_hash)
  125. except FileExistsError:
  126. print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash , " already exists")
  127. def hakkaTTS(mp3_path,ch_sentence,gender):
  128. download = False #如果要下載才需要Ture
  129. hakka_100 = import_hakka_100()
  130. word_data,multi_sound = import_data()
  131. if download:
  132. download_mp3(word_data,multi_sound)
  133. download_hakka_100(hakka_100)
  134. ch_word_list = list(itertools.chain(*word_data['華語詞義集'].tolist())) + hakka_100.chinese_clean.tolist()
  135. import_jieba_userdict(ch_word_list=ch_word_list, userDict_path='userDict.txt')
  136. gen_hakka_tts(word_data,multi_sound,hakka_100,ch_sentence,gender,mp3_path)
  137. def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  138. make_dir(name_hash)
  139. img_num = 1
  140. for imgu in image_urls:
  141. print(imgu)
  142. if get_url_type(imgu) =='video/mp4':
  143. r=requests.get(imgu)
  144. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  145. for chunk in r.iter_content(chunk_size=255):
  146. if chunk:
  147. f.write(chunk)
  148. f.close()
  149. else:
  150. im = Image.open(requests.get(imgu, stream=True).raw)
  151. im= im.convert("RGB")
  152. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  153. img_num+=1
  154. #save text
  155. txt_idx=0
  156. for txt in text_content:
  157. text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
  158. text_file.write(txt)
  159. text_file.close()
  160. txt_idx+=1
  161. print("text file made")
  162. #make mp3
  163. txt_idx = 0
  164. for txt in text_content:
  165. if multiLang==3:
  166. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,0)
  167. elif multiLang==4:
  168. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,1)
  169. else:
  170. if lang!='zh' or multiLang==1:
  171. if lang!='zh':
  172. tts = gTTS(txt)
  173. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  174. else:
  175. tts = gTTS(txt,lang='zh-tw')
  176. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  177. #speed up
  178. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  179. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  180. ff.run()
  181. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  182. else:
  183. print('use zhtts')
  184. tts = zhtts.TTS()
  185. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  186. txt_idx+=1
  187. print("mp3 file made")
  188. #make title as image
  189. txt2image_title(name, dir_title+name_hash+".png",lang)
  190. def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  191. make_dir(name_hash)
  192. img_num = 1
  193. for imgu in image_urls:
  194. if get_url_type(imgu) =='video/mp4':
  195. r=requests.get(imgu)
  196. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  197. for chunk in r.iter_content(chunk_size=255):
  198. if chunk:
  199. f.write(chunk)
  200. f.close()
  201. else:
  202. im = Image.open(requests.get(imgu, stream=True).raw)
  203. im= im.convert("RGB")
  204. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  205. img_num+=1
  206. #make mp3
  207. text_parser = parser()
  208. txt_idx = 0
  209. for txt in text_content:
  210. rep_list = text_parser.replace_list(txt)
  211. for reptxt in rep_list:
  212. txt = txt.replace(reptxt,'')
  213. if lang!='zh' or multiLang==1:
  214. if lang!='zh':
  215. tts = gTTS(txt)
  216. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  217. else:
  218. tts = gTTS(txt,lang='zh-tw')
  219. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  220. #speed up
  221. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  222. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  223. ff.run()
  224. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  225. else:
  226. print('use zhtts')
  227. tts = zhtts.TTS()
  228. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  229. txt_idx+=1
  230. print("mp3 file made")
  231. #make title as image
  232. txt2image_title(name, dir_title+name_hash+".png",lang)
  233. def txt2image(content, save_target,lang='zh'):
  234. unicode_text = trim_punctuation(content)
  235. font = ''
  236. if lang=='zh':
  237. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
  238. else :
  239. font = ImageFont.truetype(font="font/arial.ttf", size=38)
  240. text_width, text_height = font.getsize(unicode_text)
  241. canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
  242. draw = ImageDraw.Draw(canvas)
  243. text= unicode_text
  244. draw.text((5,5), text, (255, 255, 0), font)
  245. canvas.save(save_target, "PNG")
  246. def txt2image_title(content, save_target, lang='zh'):
  247. unicode_text = trim_punctuation(content)
  248. font = ''
  249. if lang=='zh':
  250. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
  251. else :
  252. font = ImageFont.truetype(font="font/arial.ttf", size=22)
  253. text_width, text_height = font.getsize(unicode_text)
  254. canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
  255. draw = ImageDraw.Draw(canvas)
  256. text= unicode_text
  257. draw.text((5,5), text, (17, 41, 167), font)
  258. canvas.save(save_target, "PNG")
  259. def call_anchor(fileName,avatar):
  260. conn = rpyc.classic.connect("192.168.1.111",18812)
  261. ros = conn.modules.os
  262. rsys = conn.modules.sys
  263. fr=open(dir_sound+fileName+".mp3",'rb')# voice
  264. #warning!!! file my be replaced by other process
  265. fw=conn.builtins.open('/tmp/output.mp3','wb')
  266. while True:
  267. b=fr.read(1024)
  268. if b:
  269. fw.write(b)
  270. else:
  271. break
  272. fr.close()
  273. fw.close()
  274. val=random.randint(1000000,9999999)
  275. ros.chdir('/home/jared/to_video')
  276. ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
  277. while True:
  278. print('waiting...')
  279. if ros.path.exists('/tmp/results/'+str(val)):
  280. break
  281. time.sleep(5)
  282. print('waiting...')
  283. fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
  284. fw=open(dir_anchor+fileName+".mp4",'wb')
  285. while True:
  286. b=fr.read(1024)
  287. if b:
  288. fw.write(b)
  289. else:
  290. break
  291. fr.close()
  292. fw.close()
  293. def syllable_count(word):
  294. word = word.lower()
  295. count = 0
  296. vowels = "aeiouy"
  297. if word[0] in vowels:
  298. count += 1
  299. for index in range(1, len(word)):
  300. if word[index] in vowels and word[index - 1] not in vowels:
  301. count += 1
  302. if word.endswith("e"):
  303. count -= 1
  304. if count == 0:
  305. count += 1
  306. return count
  307. def split_sentence(in_str, maxLen):
  308. re.findall(r'[\u4e00-\u9fff]+', in_str)
  309. zh_idx = []
  310. eng_idx= []
  311. for i in range(len(in_str)):
  312. if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
  313. zh_idx.append(i)
  314. else:
  315. eng_idx.append(i)
  316. space_index = [m.start() for m in re.finditer(' ', in_str)]
  317. for idx in space_index:
  318. eng_idx.remove(idx)
  319. eng_range_list = []
  320. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  321. eng_range = list(map(itemgetter(1), g))
  322. eng_range_list.append(eng_range)
  323. total_syllable = 0
  324. for i in range(len(eng_range_list)):
  325. total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  326. for i in range(len(zh_idx)):
  327. total_syllable+=1
  328. #final chchchchchc[en][en][en]
  329. #[en] is a vocabulary dict with occurence of image
  330. zh_eng_idx_list = []
  331. i = 0
  332. while i < len(in_str):
  333. if in_str[i]==' ':
  334. i+=1
  335. if i in zh_idx:
  336. zh_eng_idx_list.append(i)
  337. i+=1
  338. if i in eng_idx:
  339. for ls in eng_range_list:
  340. if i in ls:
  341. zh_eng_idx_list.append(ls)
  342. i = ls[-1]+1
  343. break
  344. zh_eng_dict_list = [{'content':'','time_ratio':0}]
  345. idx = 0
  346. current_len = 0
  347. sen_idx = 0
  348. while idx < len(zh_eng_idx_list):
  349. str_from_idx = ''
  350. sylla_cnt = 1
  351. if type(zh_eng_idx_list[idx])==type([]):
  352. str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
  353. sylla_cnt = syllable_count(str_from_idx)
  354. else:
  355. str_from_idx = in_str[zh_eng_idx_list[idx]]
  356. if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
  357. zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
  358. zh_eng_dict_list.append({'content':'','time_ratio':0})
  359. sen_idx+=1
  360. current_len = 0
  361. else:
  362. current_len += sylla_cnt
  363. zh_eng_dict_list[sen_idx]['content'] += str_from_idx
  364. idx+=1
  365. total_ratio = 0
  366. for obj in zh_eng_dict_list:
  367. total_ratio+=obj['time_ratio']
  368. zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
  369. return zh_eng_dict_list
  370. def parse_script(file_path,gt_list):
  371. with open(file_path, 'r',encoding="utf-8") as f:
  372. raw_lines = [line.strip() for line in f]
  373. lines = adjustSub_by_text_similarity(gt_list,raw_lines)
  374. text_parser = parser()
  375. #make dict
  376. dict_list = []
  377. for idx in range(len(lines)):
  378. script={}
  379. rep_ls = text_parser.replace_list(lines[idx])
  380. line_content = lines[idx]
  381. for reptxt in rep_ls:
  382. line_content = line_content.replace(reptxt,'')
  383. if len(rep_ls)!=0:
  384. script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
  385. script['content'] = line_content
  386. time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
  387. start = time_raw[0].split(':')
  388. stop = time_raw[1].split(':')
  389. script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
  390. script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
  391. dict_list.append(script)
  392. #merge duplicated sentences
  393. skip_list = []
  394. script_not_dup_list = []
  395. for idx in range(len(dict_list)):
  396. if idx not in skip_list:
  397. dup_list = []
  398. found = 0
  399. for idx_inner in range(len(dict_list)):
  400. if dict_list[idx_inner]['content'] == dict_list[idx]['content'] and idx <= idx_inner:
  401. dup_list.append(idx_inner)
  402. skip_list.append(idx_inner)
  403. found += 1
  404. if found != 0 and dict_list[idx_inner]['content']!=dict_list[idx]['content'] and idx <= idx_inner:
  405. found = 0
  406. break
  407. for dup_idx in dup_list:
  408. if dup_idx == min(dup_list):
  409. dict_list[dup_idx]['type'] = 'lead_sentence'
  410. else:
  411. dict_list[dup_idx]['type'] = 'duplicated'
  412. dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
  413. if dict_list[idx]['type'] == 'lead_sentence':
  414. script_not_dup_list.append(dict_list[idx])
  415. new_idx = 0
  416. splitted_dict = []
  417. for dic in script_not_dup_list:
  418. dic_idx = 0
  419. accumulated_duration = 0
  420. duration = dic['stop']-dic['start']
  421. for sub_dic in split_sentence(dic['content'],13):
  422. new_dic = {}
  423. new_dic['index'] = new_idx
  424. if 'image_idx' in dic:
  425. new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
  426. new_idx+=1
  427. ind_duration = duration * sub_dic['time_ratio']
  428. new_dic['start'] = dic['start'] + accumulated_duration
  429. accumulated_duration += ind_duration
  430. new_dic['content'] = sub_dic['content']
  431. new_dic['duration'] = ind_duration*0.7
  432. splitted_dict.append(new_dic)
  433. return splitted_dict
  434. def adjustSub_by_text_similarity(gts_in,gens_raw):
  435. #call by value only
  436. gts = gts_in[:]
  437. text_parser = parser()
  438. for i in range(len(gts)):
  439. rep_ls = text_parser.replace_list(gts[i])
  440. for reptxt in rep_ls:
  441. gts[i] = gts[i].replace(reptxt,'')
  442. gens = []
  443. for idx in range(int((len(gens_raw)+1)/4)):
  444. gens.append(gens_raw[idx*4+2])
  445. combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
  446. combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
  447. alls = gts #+ combine2 + combine3
  448. adjusted = [None]*len(gens)
  449. duplicated_list = []
  450. for idx in range(len(gens)):
  451. match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
  452. if len(match_text) != 0:
  453. if match_text[0] not in duplicated_list:
  454. adjusted[idx] = match_text[0]
  455. duplicated_list.append(match_text[0])
  456. else:
  457. if match_text[0] == adjusted[idx-1]:
  458. adjusted[idx] = match_text[0]
  459. else:
  460. found = 0
  461. for mt in match_text:
  462. if mt not in duplicated_list:
  463. adjusted[idx] = mt
  464. found += 1
  465. break
  466. if found ==0:
  467. adjusted[idx] = ' '
  468. else :
  469. adjusted[idx] = ' '
  470. combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
  471. combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
  472. alls_tag = gts_in #+ combine2_tag + combine3_tag
  473. for idx in range(len(adjusted)):
  474. match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
  475. adjusted[idx] = match_text[0]
  476. return adjusted
  477. def trim_punctuation(s):
  478. pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
  479. pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
  480. res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
  481. return res
  482. def splitter(s):
  483. for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
  484. yield sent
  485. def split_by_pun(s):
  486. res = list(splitter(s))
  487. return res
  488. def generate_subtitle_image_from_dict(name_hash, sub_dict):
  489. for script in sub_dict:
  490. sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  491. sub = script['content']
  492. txt2image(sub,sv_path)
  493. def generate_subtitle_image(name_hash,text_content):
  494. img_list = [None]*len(text_content)
  495. for idx in range(len(text_content)):
  496. img_list[idx]=[]
  497. senList = split_by_pun(text_content[idx])
  498. for inner_idx in range(len(senList)):
  499. sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
  500. sub = senList[inner_idx]
  501. txt2image(sub,sv_path)
  502. clean_content = trim_punctuation(sub)
  503. re.findall(r'[\u4e00-\u9fff]+', clean_content)
  504. zh_idx = []
  505. eng_idx= []
  506. for i in range(len(clean_content)):
  507. if clean_content[i] > u'\u4e00' and clean_content[i] < u'\u9fff':
  508. zh_idx.append(i)
  509. else:
  510. eng_idx.append(i)
  511. space_index = [m.start() for m in re.finditer(' ', clean_content)]
  512. for s_idx in space_index:
  513. eng_idx.remove(s_idx)
  514. eng_range_list = []
  515. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  516. eng_range = list(map(itemgetter(1), g))
  517. eng_range_list.append(eng_range)
  518. total_syllable = 0
  519. for i in range(len(eng_range_list)):
  520. total_syllable += (syllable_count(clean_content[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  521. for i in range(len(zh_idx)):
  522. total_syllable+=1
  523. img_list[idx]+=[{"count":total_syllable,"path":sv_path}]
  524. return img_list
  525. def generate_subtitle_image_ENG(name_hash,text_content):
  526. img_list = [None]*len(text_content)
  527. for idx in range(len(text_content)):
  528. sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
  529. sub = text_content[idx]
  530. txt2image(sub, sv_path,lang='eng')
  531. img_list[idx] = sv_path
  532. return img_list
  533. def video_writer_init(path):
  534. w = openshot.FFmpegWriter(path)
  535. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  536. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  537. openshot.Fraction(1, 1), False, False, 3000000)
  538. return w
  539. def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
  540. file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
  541. for fname in range(len(text_content)):
  542. call_anchor(name_hash+"/"+str(fname),avatar)
  543. print('called............................................')
  544. ck=cKey(0,254,0,270)
  545. ck_anchor=cKey(0,255,1,320)
  546. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  547. t.Open()
  548. main_timer = 0
  549. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  550. LOGO_OP.Open() # Open the reader
  551. head_duration = LOGO_OP.info.duration
  552. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
  553. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  554. t.AddClip(LOGO_OP_clip)
  555. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  556. bg_head.Open()
  557. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  558. t.AddClip(bg_head_clip)
  559. main_timer += head_duration
  560. bg_head.Close()
  561. LOGO_OP.Close()
  562. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  563. anchor.Open()
  564. #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  565. # location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
  566. #t.AddClip(anchor_clip)
  567. speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
  568. speech.Open()
  569. speech_clip = openshot.Clip(speech)
  570. speech_clip.Position(main_timer)
  571. speech_clip.End(anchor.info.duration)
  572. t.AddClip(speech_clip)
  573. main_timer += anchor.info.duration
  574. anchor.Close()
  575. speech.Close()
  576. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  577. LOGO_ED.Open()
  578. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
  579. ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
  580. t.AddClip(LOGO_ED_clip)
  581. main_timer += LOGO_ED.info.duration
  582. LOGO_ED.Close()
  583. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  584. bg.Open()
  585. bg_times = math.floor(main_timer/bg.info.duration)
  586. left_time = (main_timer) % bg.info.duration
  587. bg_clip_list = [None] * bg_times
  588. bg_list = [None] * bg_times
  589. bg.Close()
  590. bg_timer = head_duration
  591. for idx in range(bg_times):
  592. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  593. bg_list[idx].Open()
  594. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
  595. t.AddClip(bg_clip_list[idx])
  596. bg_timer += bg_list[idx].info.duration
  597. bg_list[idx].Close()
  598. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  599. bg_left.Open()
  600. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  601. t.AddClip(bg_left_clip)
  602. bg_left.Close()
  603. title = openshot.QtImageReader(dir_title+name_hash+".png")
  604. title.Open() # Open the reader
  605. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  606. t.AddClip(title_clip)
  607. w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
  608. w.Open()
  609. frames = int(t.info.fps)*int(main_timer)
  610. for n in range(frames):
  611. f=t.GetFrame(n)
  612. w.WriteFrame(f)
  613. t.Close()
  614. w.Close()
  615. print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
  616. #start adding sub
  617. #add sub
  618. Ctr_Autosub.init()
  619. Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
  620. sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
  621. for subd in sub_dict:
  622. print(subd)
  623. generate_subtitle_image_from_dict(name_hash, sub_dict)
  624. #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  625. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  626. t.Open()
  627. raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
  628. raw.Open()
  629. raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
  630. t.AddClip(raw_clip)
  631. sub_img_list = [None] * len(sub_dict)
  632. sub_clip_list = [None] * len(sub_dict)
  633. for sub_obj in sub_dict:
  634. idx = int(sub_obj['index'])
  635. sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
  636. sub_img_list[idx].Open()
  637. #if sub_obj['duration']>3:
  638. # print('warning')
  639. #print('start:',sub_obj['start'],', duration :', sub_obj['duration'],' content',sub_obj['content'],'idx:',sub_obj['index'])
  640. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
  641. t.AddClip(sub_clip_list[idx])
  642. sub_img_list[idx].Close()
  643. tp = parser()
  644. img_dict_ls = tp.image_clip_info(sub_dict)
  645. img_clip_list = [None]*len(listdir(dir_photo+name_hash))
  646. img_list = [None]*len(img_clip_list)
  647. img_file_ls = listdir(dir_photo+name_hash)
  648. for img_idx in range(len(img_file_ls)):
  649. img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
  650. img_list[img_idx].Open()
  651. img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
  652. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
  653. t.AddClip(img_clip_list[img_idx])
  654. img_list[img_idx].Close()
  655. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  656. anchor.Open()
  657. anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  658. location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
  659. t.AddClip(anchor_clip)
  660. w = video_writer_init(tmp_video_dir+name_hash+".mp4")
  661. w.Open()
  662. frames = int(t.info.fps)*int(main_timer)
  663. for n in range(frames):
  664. f=t.GetFrame(n)
  665. w.WriteFrame(f)
  666. t.Close()
  667. w.Close()
  668. os.remove(tmp_video_dir+name_hash+"raw.mp4")
  669. os.remove(tmp_video_dir+name_hash+"script.txt")
  670. print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
  671. def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  672. print(name)
  673. print(text_content)
  674. print(os.getcwd())
  675. print('sub image made')
  676. print(multiLang)
  677. file_prepare(name, name_hash, text_content,image_urls,multiLang)
  678. sub_list=generate_subtitle_image(name_hash,text_content)
  679. for fname in range(len(text_content)):
  680. call_anchor(name_hash+"/"+str(fname),avatar)
  681. print('step finish')
  682. print('called............................................')
  683. ck=cKey(0,254,0,270)
  684. ck_anchor=cKey(0,255,1,320)
  685. duration = 0
  686. #average layer level is 3
  687. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  688. t.Open()
  689. main_timer = 0
  690. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  691. LOGO_OP.Open() # Open the reader
  692. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  693. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  694. t.AddClip(LOGO_OP_clip)
  695. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  696. bg_head.Open()
  697. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  698. t.AddClip(bg_head_clip)
  699. main_timer += LOGO_OP.info.duration
  700. head_duration = LOGO_OP.info.duration
  701. bg_head.Close()
  702. LOGO_OP.Close()
  703. clip_duration=0
  704. photo_clip_list = [None]*len(text_content)
  705. img_list = [None]*len(text_content)
  706. anchor_clip_list = [None] * len(text_content)
  707. anchor_list = [None] * len(text_content)
  708. audio_clip_list = [None] * len(text_content)
  709. audio_list = [None] * len(text_content)
  710. sub_clip_list = [None] * len(text_content)
  711. sub_img_list = [None] * len(text_content)
  712. idx = 0
  713. for p in listdir(dir_photo+name_hash):
  714. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  715. clip_duration = anchor_list[idx].info.duration
  716. anchor_list[idx].Open()
  717. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  718. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  719. print('avatar is ', avatar)
  720. t.AddClip(anchor_clip_list[idx])
  721. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  722. img_list[idx].Open()
  723. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  724. ,scale_x=0.8,scale_y=0.6825,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  725. t.AddClip(photo_clip_list[idx])
  726. img_list[idx].Close()
  727. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  728. audio_list[idx].Open()
  729. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  730. audio_clip_list[idx].Position(main_timer)
  731. audio_clip_list[idx].End(clip_duration)
  732. t.AddClip(audio_clip_list[idx])
  733. img_list[idx].Close()
  734. anchor_list[idx].Close()
  735. audio_list[idx].Close()
  736. sub_img_list[idx] = [None] * len(sub_list[idx])
  737. sub_clip_list[idx] = [None] * len(sub_list[idx])
  738. sub_timer = 0
  739. for sub_idx in range(len(sub_list[idx])):
  740. sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
  741. sub_img_list[idx][sub_idx].Open()
  742. sub_duration = 0.205*sub_list[idx][sub_idx]['count']
  743. sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
  744. t.AddClip(sub_clip_list[idx][sub_idx])
  745. sub_img_list[idx][sub_idx].Close()
  746. sub_timer += sub_duration
  747. print(sub_list[idx][sub_idx]['path'])
  748. main_timer += clip_duration
  749. idx+=1
  750. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  751. LOGO_ED.Open()
  752. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  753. ,location_x=0.005,location_y=-0.031
  754. ,scale_x=0.8,scale_y=0.6825)
  755. t.AddClip(LOGO_ED_clip)
  756. ED_duration = LOGO_ED.info.duration
  757. LOGO_ED.Close()
  758. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  759. bg.Open()
  760. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  761. left_time = (main_timer+ED_duration) % bg.info.duration
  762. bg_clip_list = [None] * bg_times
  763. bg_list = [None] * bg_times
  764. bg.Close()
  765. bg_timer = head_duration
  766. for idx in range(bg_times):
  767. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  768. bg_list[idx].Open()
  769. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  770. ,end=bg_list[idx].info.duration,ck=ck)
  771. t.AddClip(bg_clip_list[idx])
  772. bg_timer += bg_list[idx].info.duration
  773. bg_list[idx].Close()
  774. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  775. bg_left.Open()
  776. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  777. t.AddClip(bg_left_clip)
  778. bg_left.Close()
  779. title = openshot.QtImageReader(dir_title+name_hash+".png")
  780. title.Open() # Open the reader
  781. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  782. t.AddClip(title_clip)
  783. if freeTrial==1:
  784. print("THIS IS TRIAL")
  785. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  786. wm.Open()
  787. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  788. #t.AddClip(wm_clip)
  789. else:
  790. print("THIS IS NOT TRIAL")
  791. print(freeTrial)
  792. ####start building
  793. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  794. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  795. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  796. openshot.Fraction(1, 1), False, False, 3000000)
  797. w.Open()
  798. #may change duration into t.info.duration
  799. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  800. for n in range(frames):
  801. f=t.GetFrame(n)
  802. w.WriteFrame(f)
  803. #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  804. t.Close()
  805. w.Close()
  806. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  807. def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  808. file_prepare(name, name_hash, text_content,image_urls,1,'eng')
  809. sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
  810. for fname in range(len(text_content)):
  811. call_anchor(name_hash+"/"+str(fname),avatar)
  812. print('step finish')
  813. print('called............................................')
  814. ck=cKey(0,254,0,270)
  815. ck_anchor=cKey(0,255,1,320)
  816. duration = 0
  817. #average layer level is 3
  818. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  819. t.Open()
  820. main_timer = 0
  821. #add logo
  822. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  823. LOGO_OP.Open() # Open the reader
  824. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  825. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  826. t.AddClip(LOGO_OP_clip)
  827. #add background video (head is different)
  828. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  829. bg_head.Open()
  830. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  831. t.AddClip(bg_head_clip)
  832. main_timer += LOGO_OP.info.duration
  833. head_duration = LOGO_OP.info.duration
  834. bg_head.Close()
  835. LOGO_OP.Close()
  836. #prepare empty list
  837. clip_duration=0
  838. photo_clip_list = [None]*len(text_content)
  839. img_list = [None]*len(text_content)
  840. anchor_clip_list = [None] * len(text_content)
  841. anchor_list = [None] * len(text_content)
  842. audio_clip_list = [None] * len(text_content)
  843. audio_list = [None] * len(text_content)
  844. sub_clip_list = [None] * len(text_content)
  845. #openshot image holder
  846. sub_img_list = [None] * len(text_content)
  847. idx = 0
  848. for p in listdir(dir_photo+name_hash):
  849. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  850. clip_duration = anchor_list[idx].info.duration
  851. anchor_list[idx].Open()
  852. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  853. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  854. t.AddClip(anchor_clip_list[idx])
  855. #insert image
  856. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  857. img_list[idx].Open()
  858. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  859. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  860. t.AddClip(photo_clip_list[idx])
  861. img_list[idx].Close()
  862. #insert audio (speech)
  863. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  864. audio_list[idx].Open()
  865. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  866. audio_clip_list[idx].Position(main_timer)
  867. audio_clip_list[idx].End(clip_duration)
  868. t.AddClip(audio_clip_list[idx])
  869. #insert subtitle
  870. sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
  871. sub_img_list[idx].Open()
  872. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
  873. t.AddClip(sub_clip_list[idx])
  874. img_list[idx].Close()
  875. anchor_list[idx].Close()
  876. audio_list[idx].Close()
  877. sub_img_list[idx].Close()
  878. main_timer += clip_duration
  879. idx+=1
  880. LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
  881. LOGO_ED.Open()
  882. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  883. ,location_x=0.005,location_y=-0.031
  884. ,scale_x=0.8,scale_y=0.6825)
  885. t.AddClip(LOGO_ED_clip)
  886. ED_duration = LOGO_ED.info.duration
  887. LOGO_ED.Close()
  888. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  889. bg.Open()
  890. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  891. left_time = (main_timer+ED_duration) % bg.info.duration
  892. bg_clip_list = [None] * bg_times
  893. bg_list = [None] * bg_times
  894. bg.Close()
  895. bg_timer = head_duration
  896. for idx in range(bg_times):
  897. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  898. bg_list[idx].Open()
  899. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  900. ,end=bg_list[idx].info.duration,ck=ck)
  901. t.AddClip(bg_clip_list[idx])
  902. bg_timer += bg_list[idx].info.duration
  903. bg_list[idx].Close()
  904. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  905. bg_left.Open()
  906. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  907. t.AddClip(bg_left_clip)
  908. bg_left.Close()
  909. title = openshot.QtImageReader(dir_title+name_hash+".png")
  910. title.Open() # Open the reader
  911. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  912. t.AddClip(title_clip)
  913. if freeTrial==1:
  914. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  915. wm.Open()
  916. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  917. #t.AddClip(wm_clip)
  918. print("THIS IS TRIAL")
  919. else:
  920. print("THIS IS NOT TRIAL")
  921. print(freeTrial)
  922. ####start building
  923. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  924. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  925. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  926. openshot.Fraction(1, 1), False, False, 3000000)
  927. w.Open()
  928. #may change duration into t.info.duration
  929. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  930. for n in range(frames):
  931. f=t.GetFrame(n)
  932. w.WriteFrame(f)
  933. #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  934. t.Close()
  935. w.Close()
  936. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  937. #line notifs
  938. import pyttsx3
  939. #def make_speech(text):
  940. #engine = pyttsx3.init()
  941. ##voices = engine.getProperty('voices')
  942. #engine.setProperty('voice', 'Mandarin')
  943. #engine.save_to_file(text, '/app/speech.mp3')
  944. #engine.runAndWait()
  945. import json
  946. # 資料
  947. # 關於voice 請參考這張表https://github.com/playht/text-to-speech-api/blob/master/Voices.md
  948. def make_speech(text,output="/app/speech.mp3",voice="zh-CN-XiaoyouNeural"):
  949. my_data = {
  950. "voice": voice,
  951. "content": [str(text)] #["你好,很高興認識你","喜歡","討厭"]
  952. # "ssml": string[]
  953. # "title": string, // Optional
  954. # "narrationStyle": string, // Optional
  955. # "globalSpeed": string, // Optional
  956. # "pronunciations": { key: string, value: string }[], // Optional
  957. # "trimSilence": boolean, // Optional
  958. }
  959. headers = {
  960. # 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
  961. "Authorization":"84e1df1b61114e75b134b5ec496b8922",
  962. "X-User-ID":'HEQLQR1WgpYtN0SEyKoWBsLiZXX2',
  963. "Content-Type": "application/json"
  964. }
  965. # 將資料加入 POST 請求中
  966. r = requests.post('https://play.ht/api/v1/convert',headers=headers,data=json.dumps(my_data))
  967. c1 = r.json()['transcriptionId']
  968. # print(c1)
  969. time.sleep(len(text))
  970. r = requests.get('https://play.ht/api/v1/articleStatus?transcriptionId=%s'%c1, headers=headers)
  971. # print(r.status_code)
  972. file = requests.get(r.json()['audioUrl'])
  973. with open(output,"wb") as f:
  974. for chunk in file.iter_content(chunk_size=1024):
  975. if chunk:
  976. f.write(chunk)
  977. class video_service(rpyc.Service):
  978. def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  979. print('ML:'+str(multiLang))
  980. anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial)
  981. def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  982. anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial)
  983. def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
  984. print('ML:'+str(multiLang))#this is long video version,
  985. video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
  986. def exposed_make_speech(self,text):
  987. make_speech(text)
  988. from rpyc.utils.server import ThreadedServer
  989. t = ThreadedServer(video_service, port=8858)
  990. print('service started')
  991. t.start()