openshot_video_generator.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. from os import listdir
  2. from os.path import isfile, isdir, join
  3. import openshot
  4. import threading
  5. import zhtts
  6. import os
  7. import urllib
  8. from typing import List
  9. import requests
  10. from pydantic import BaseModel
  11. from bs4 import BeautifulSoup
  12. from PIL import Image,ImageDraw,ImageFont
  13. import pyttsx3
  14. import rpyc
  15. import random
  16. import re
  17. import time
  18. import math
  19. import dataset
  20. from datetime import datetime
  21. from gtts import gTTS
  22. import ffmpy
  23. from difflib import SequenceMatcher
  24. import difflib
  25. from autosub import DEFAULT_CONCURRENCY
  26. from autosub import DEFAULT_SUBTITLE_FORMAT
  27. from pytranscriber.control.ctr_main import Ctr_Main
  28. from pytranscriber.control.ctr_autosub import Ctr_Autosub
  29. import multiprocessing
  30. from itertools import groupby
  31. from operator import itemgetter
  32. from openUtil.parser import parser
  33. import pandas as pd
  34. import numpy as np
  35. import jieba
  36. import jieba.posseg as pseg
  37. import urllib.request
  38. import librosa
  39. from pydub import AudioSegment
  40. from pydub.silence import split_on_silence
  41. import itertools
  42. from hakkaUtil import *
  43. dir_sound = 'mp3_track/'
  44. dir_photo = 'photo/'
  45. dir_text = 'text_file/'
  46. dir_video = 'video_material/'
  47. dir_title = 'title/'
  48. dir_subtitle = 'subtitle/'
  49. dir_anchor = 'anchor_raw/'
  50. tmp_video_dir = 'tmp_video/'
  51. video_sub_folder = 'ai_anchor_video/'
  52. dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
  53. def notify_group(msg):
  54. glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
  55. for gid in glist:
  56. headers = {
  57. "Authorization": "Bearer " + gid,
  58. "Content-Type": "application/x-www-form-urlencoded"
  59. }
  60. params = {"message": msg}
  61. r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
  62. def cKey(r,g,b,fuzz):
  63. col=openshot.Color()
  64. col.red=openshot.Keyframe(r)
  65. col.green=openshot.Keyframe(g)
  66. col.blue=openshot.Keyframe(b)
  67. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  68. def video_photo_clip(vid=None,layer=None, position=None, end=None
  69. ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
  70. clip = openshot.Clip(vid)
  71. clip.Layer(layer)
  72. clip.Position(position)
  73. clip.End(end)
  74. clip.scale_x=openshot.Keyframe(scale_x)
  75. clip.scale_y=openshot.Keyframe(scale_y)
  76. clip.location_x=openshot.Keyframe(location_x)
  77. clip.location_y=openshot.Keyframe(location_y)
  78. if ck!=None:
  79. clip.AddEffect(ck)
  80. if audio==True:
  81. clip.has_audio=openshot.Keyframe(1)
  82. else:
  83. clip.has_audio=openshot.Keyframe(0)
  84. return clip
  85. def listener_progress(string, percent):
  86. True
  87. def myunichchar(unicode_char):
  88. mb_string = unicode_char.encode('big5')
  89. try:
  90. unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
  91. except NameError:
  92. unicode_char = chr(mb_string[0] << 8 | mb_string[1])
  93. return unicode_char
  94. def get_url_type(url):
  95. print('---------------------------------------------')
  96. print(url)
  97. req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
  98. r = urllib.request.urlopen(req)
  99. contentType = r.getheader('Content-Type')
  100. print(contentType)
  101. print('-------------------------------------------------')
  102. return contentType
  103. def make_dir(name_hash):
  104. for direct in dir_list:
  105. if not os.path.isdir(direct):
  106. os.mkdir(direct)
  107. try:
  108. os.mkdir(dir_photo+name_hash)
  109. except FileExistsError:
  110. print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash , " already exists")
  111. try:
  112. os.mkdir(dir_text+name_hash)
  113. except FileExistsError:
  114. print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash , " already exists")
  115. try:
  116. os.mkdir(dir_sound+name_hash)
  117. except FileExistsError:
  118. print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash , " already exists")
  119. try:
  120. os.mkdir(dir_anchor+name_hash)
  121. except FileExistsError:
  122. print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash , " already exists")
  123. try:
  124. os.mkdir(dir_subtitle+name_hash)
  125. except FileExistsError:
  126. print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash , " already exists")
  127. def hakkaTTS(mp3_path,ch_sentence,gender):
  128. download = False #如果要下載才需要Ture
  129. hakka_100 = import_hakka_100()
  130. word_data,multi_sound = import_data()
  131. if download:
  132. download_mp3(word_data,multi_sound)
  133. download_hakka_100(hakka_100)
  134. ch_word_list = list(itertools.chain(*word_data['華語詞義集'].tolist())) + hakka_100.chinese_clean.tolist()
  135. import_jieba_userdict(ch_word_list=ch_word_list, userDict_path='userDict.txt')
  136. gen_hakka_tts(word_data,multi_sound,hakka_100,ch_sentence,gender,mp3_path)
  137. def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  138. make_dir(name_hash)
  139. img_num = 1
  140. for imgu in image_urls:
  141. if get_url_type(imgu) =='video/mp4':
  142. r=requests.get(imgu)
  143. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  144. for chunk in r.iter_content(chunk_size=255):
  145. if chunk:
  146. f.write(chunk)
  147. f.close()
  148. else:
  149. im = Image.open(requests.get(imgu, stream=True).raw)
  150. im= im.convert("RGB")
  151. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  152. img_num+=1
  153. #save text
  154. txt_idx=0
  155. for txt in text_content:
  156. text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
  157. text_file.write(txt)
  158. text_file.close()
  159. txt_idx+=1
  160. print("text file made")
  161. #make mp3
  162. txt_idx = 0
  163. for txt in text_content:
  164. if multiLang==3:
  165. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,0)
  166. elif multiLang==4:
  167. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,1)
  168. else:
  169. if lang!='zh' or multiLang==1:
  170. if lang!='zh':
  171. tts = gTTS(txt)
  172. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  173. else:
  174. tts = gTTS(txt,lang='zh-tw')
  175. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  176. #speed up
  177. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  178. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  179. ff.run()
  180. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  181. else:
  182. print('use zhtts')
  183. tts = zhtts.TTS()
  184. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  185. txt_idx+=1
  186. print("mp3 file made")
  187. #make title as image
  188. txt2image_title(name, dir_title+name_hash+".png",lang)
  189. def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  190. make_dir(name_hash)
  191. img_num = 1
  192. for imgu in image_urls:
  193. if get_url_type(imgu) =='video/mp4':
  194. r=requests.get(imgu)
  195. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  196. for chunk in r.iter_content(chunk_size=255):
  197. if chunk:
  198. f.write(chunk)
  199. f.close()
  200. else:
  201. im = Image.open(requests.get(imgu, stream=True).raw)
  202. im= im.convert("RGB")
  203. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  204. img_num+=1
  205. #make mp3
  206. text_parser = parser()
  207. txt_idx = 0
  208. for txt in text_content:
  209. rep_list = text_parser.replace_list(txt)
  210. for reptxt in rep_list:
  211. txt = txt.replace(reptxt,'')
  212. if lang!='zh' or multiLang==1:
  213. if lang!='zh':
  214. tts = gTTS(txt)
  215. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  216. else:
  217. tts = gTTS(txt,lang='zh-tw')
  218. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  219. #speed up
  220. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  221. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  222. ff.run()
  223. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  224. else:
  225. print('use zhtts')
  226. tts = zhtts.TTS()
  227. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  228. txt_idx+=1
  229. print("mp3 file made")
  230. #make title as image
  231. txt2image_title(name, dir_title+name_hash+".png",lang)
  232. def txt2image(content, save_target,lang='zh'):
  233. unicode_text = trim_punctuation(content)
  234. font = ''
  235. if lang=='zh':
  236. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
  237. else :
  238. font = ImageFont.truetype(font="font/arial.ttf", size=38)
  239. text_width, text_height = font.getsize(unicode_text)
  240. canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
  241. draw = ImageDraw.Draw(canvas)
  242. text= unicode_text
  243. draw.text((5,5), text, (255, 255, 0), font)
  244. canvas.save(save_target, "PNG")
  245. def txt2image_title(content, save_target, lang='zh'):
  246. unicode_text = trim_punctuation(content)
  247. font = ''
  248. if lang=='zh':
  249. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
  250. else :
  251. font = ImageFont.truetype(font="font/arial.ttf", size=22)
  252. text_width, text_height = font.getsize(unicode_text)
  253. canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
  254. draw = ImageDraw.Draw(canvas)
  255. text= unicode_text
  256. draw.text((5,5), text, (17, 41, 167), font)
  257. canvas.save(save_target, "PNG")
  258. def call_anchor(fileName,avatar):
  259. conn = rpyc.classic.connect("192.168.1.111",18812)
  260. ros = conn.modules.os
  261. rsys = conn.modules.sys
  262. fr=open(dir_sound+fileName+".mp3",'rb')# voice
  263. #warning!!! file my be replaced by other process
  264. fw=conn.builtins.open('/tmp/output.mp3','wb')
  265. while True:
  266. b=fr.read(1024)
  267. if b:
  268. fw.write(b)
  269. else:
  270. break
  271. fr.close()
  272. fw.close()
  273. val=random.randint(1000000,9999999)
  274. ros.chdir('/home/jared/to_video')
  275. ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
  276. while True:
  277. print('waiting...')
  278. if ros.path.exists('/tmp/results/'+str(val)):
  279. break
  280. time.sleep(5)
  281. print('waiting...')
  282. fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
  283. fw=open(dir_anchor+fileName+".mp4",'wb')
  284. while True:
  285. b=fr.read(1024)
  286. if b:
  287. fw.write(b)
  288. else:
  289. break
  290. fr.close()
  291. fw.close()
  292. def syllable_count(word):
  293. word = word.lower()
  294. count = 0
  295. vowels = "aeiouy"
  296. if word[0] in vowels:
  297. count += 1
  298. for index in range(1, len(word)):
  299. if word[index] in vowels and word[index - 1] not in vowels:
  300. count += 1
  301. if word.endswith("e"):
  302. count -= 1
  303. if count == 0:
  304. count += 1
  305. return count
  306. def split_sentence(in_str, maxLen):
  307. re.findall(r'[\u4e00-\u9fff]+', in_str)
  308. zh_idx = []
  309. eng_idx= []
  310. for i in range(len(in_str)):
  311. if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
  312. zh_idx.append(i)
  313. else:
  314. eng_idx.append(i)
  315. space_index = [m.start() for m in re.finditer(' ', in_str)]
  316. for idx in space_index:
  317. eng_idx.remove(idx)
  318. eng_range_list = []
  319. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  320. eng_range = list(map(itemgetter(1), g))
  321. eng_range_list.append(eng_range)
  322. total_syllable = 0
  323. for i in range(len(eng_range_list)):
  324. total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  325. for i in range(len(zh_idx)):
  326. total_syllable+=1
  327. #final chchchchchc[en][en][en]
  328. #[en] is a vocabulary dict with occurence of image
  329. zh_eng_idx_list = []
  330. i = 0
  331. while i < len(in_str):
  332. if in_str[i]==' ':
  333. i+=1
  334. if i in zh_idx:
  335. zh_eng_idx_list.append(i)
  336. i+=1
  337. if i in eng_idx:
  338. for ls in eng_range_list:
  339. if i in ls:
  340. zh_eng_idx_list.append(ls)
  341. i = ls[-1]+1
  342. break
  343. zh_eng_dict_list = [{'content':'','time_ratio':0}]
  344. idx = 0
  345. current_len = 0
  346. sen_idx = 0
  347. while idx < len(zh_eng_idx_list):
  348. str_from_idx = ''
  349. sylla_cnt = 1
  350. if type(zh_eng_idx_list[idx])==type([]):
  351. str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
  352. sylla_cnt = syllable_count(str_from_idx)
  353. else:
  354. str_from_idx = in_str[zh_eng_idx_list[idx]]
  355. if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
  356. zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
  357. zh_eng_dict_list.append({'content':'','time_ratio':0})
  358. sen_idx+=1
  359. current_len = 0
  360. else:
  361. current_len += sylla_cnt
  362. zh_eng_dict_list[sen_idx]['content'] += str_from_idx
  363. idx+=1
  364. total_ratio = 0
  365. for obj in zh_eng_dict_list:
  366. total_ratio+=obj['time_ratio']
  367. zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
  368. return zh_eng_dict_list
  369. def parse_script(file_path,gt_list):
  370. with open(file_path, 'r',encoding="utf-8") as f:
  371. raw_lines = [line.strip() for line in f]
  372. lines = adjustSub_by_text_similarity(gt_list,raw_lines)
  373. text_parser = parser()
  374. #make dict
  375. dict_list = []
  376. for idx in range(len(lines)):
  377. script={}
  378. rep_ls = text_parser.replace_list(lines[idx])
  379. line_content = lines[idx]
  380. for reptxt in rep_ls:
  381. line_content = line_content.replace(reptxt,'')
  382. if len(rep_ls)!=0:
  383. script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
  384. script['content'] = line_content
  385. time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
  386. start = time_raw[0].split(':')
  387. stop = time_raw[1].split(':')
  388. script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
  389. script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
  390. dict_list.append(script)
  391. #merge duplicated sentences
  392. skip_list = []
  393. script_not_dup_list = []
  394. for idx in range(len(dict_list)):
  395. if idx not in skip_list:
  396. dup_list = []
  397. found = 0
  398. for idx_inner in range(len(dict_list)):
  399. if dict_list[idx_inner]['content'] == dict_list[idx]['content'] and idx <= idx_inner:
  400. dup_list.append(idx_inner)
  401. skip_list.append(idx_inner)
  402. found += 1
  403. if found != 0 and dict_list[idx_inner]['content']!=dict_list[idx]['content'] and idx <= idx_inner:
  404. found = 0
  405. break
  406. for dup_idx in dup_list:
  407. if dup_idx == min(dup_list):
  408. dict_list[dup_idx]['type'] = 'lead_sentence'
  409. else:
  410. dict_list[dup_idx]['type'] = 'duplicated'
  411. dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
  412. if dict_list[idx]['type'] == 'lead_sentence':
  413. script_not_dup_list.append(dict_list[idx])
  414. new_idx = 0
  415. splitted_dict = []
  416. for dic in script_not_dup_list:
  417. dic_idx = 0
  418. accumulated_duration = 0
  419. duration = dic['stop']-dic['start']
  420. for sub_dic in split_sentence(dic['content'],13):
  421. new_dic = {}
  422. new_dic['index'] = new_idx
  423. if 'image_idx' in dic:
  424. new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
  425. new_idx+=1
  426. ind_duration = duration * sub_dic['time_ratio']
  427. new_dic['start'] = dic['start'] + accumulated_duration
  428. accumulated_duration += ind_duration
  429. new_dic['content'] = sub_dic['content']
  430. new_dic['duration'] = ind_duration*0.7
  431. splitted_dict.append(new_dic)
  432. return splitted_dict
  433. def adjustSub_by_text_similarity(gts_in,gens_raw):
  434. #call by value only
  435. gts = gts_in[:]
  436. text_parser = parser()
  437. for i in range(len(gts)):
  438. rep_ls = text_parser.replace_list(gts[i])
  439. for reptxt in rep_ls:
  440. gts[i] = gts[i].replace(reptxt,'')
  441. gens = []
  442. for idx in range(int((len(gens_raw)+1)/4)):
  443. gens.append(gens_raw[idx*4+2])
  444. combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
  445. combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
  446. alls = gts #+ combine2 + combine3
  447. adjusted = [None]*len(gens)
  448. duplicated_list = []
  449. for idx in range(len(gens)):
  450. match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
  451. if len(match_text) != 0:
  452. if match_text[0] not in duplicated_list:
  453. adjusted[idx] = match_text[0]
  454. duplicated_list.append(match_text[0])
  455. else:
  456. if match_text[0] == adjusted[idx-1]:
  457. adjusted[idx] = match_text[0]
  458. else:
  459. found = 0
  460. for mt in match_text:
  461. if mt not in duplicated_list:
  462. adjusted[idx] = mt
  463. found += 1
  464. break
  465. if found ==0:
  466. adjusted[idx] = ' '
  467. else :
  468. adjusted[idx] = ' '
  469. combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
  470. combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
  471. alls_tag = gts_in #+ combine2_tag + combine3_tag
  472. for idx in range(len(adjusted)):
  473. match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
  474. adjusted[idx] = match_text[0]
  475. return adjusted
  476. def trim_punctuation(s):
  477. pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
  478. pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
  479. res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
  480. return res
  481. def splitter(s):
  482. for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
  483. yield sent
  484. def split_by_pun(s):
  485. res = list(splitter(s))
  486. return res
  487. def generate_subtitle_image_from_dict(name_hash, sub_dict):
  488. for script in sub_dict:
  489. sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  490. sub = script['content']
  491. txt2image(sub,sv_path)
  492. def generate_subtitle_image(name_hash,text_content):
  493. img_list = [None]*len(text_content)
  494. for idx in range(len(text_content)):
  495. img_list[idx]=[]
  496. senList = split_by_pun(text_content[idx])
  497. for inner_idx in range(len(senList)):
  498. sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
  499. sub = senList[inner_idx]
  500. txt2image(sub,sv_path)
  501. clean_content = trim_punctuation(sub)
  502. re.findall(r'[\u4e00-\u9fff]+', clean_content)
  503. zh_idx = []
  504. eng_idx= []
  505. for i in range(len(clean_content)):
  506. if clean_content[i] > u'\u4e00' and clean_content[i] < u'\u9fff':
  507. zh_idx.append(i)
  508. else:
  509. eng_idx.append(i)
  510. space_index = [m.start() for m in re.finditer(' ', clean_content)]
  511. for s_idx in space_index:
  512. eng_idx.remove(s_idx)
  513. eng_range_list = []
  514. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  515. eng_range = list(map(itemgetter(1), g))
  516. eng_range_list.append(eng_range)
  517. total_syllable = 0
  518. for i in range(len(eng_range_list)):
  519. total_syllable += (syllable_count(clean_content[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  520. for i in range(len(zh_idx)):
  521. total_syllable+=1
  522. img_list[idx]+=[{"count":total_syllable,"path":sv_path}]
  523. return img_list
  524. def generate_subtitle_image_ENG(name_hash,text_content):
  525. img_list = [None]*len(text_content)
  526. for idx in range(len(text_content)):
  527. sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
  528. sub = text_content[idx]
  529. txt2image(sub, sv_path,lang='eng')
  530. img_list[idx] = sv_path
  531. return img_list
  532. def video_writer_init(path):
  533. w = openshot.FFmpegWriter(path)
  534. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  535. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  536. openshot.Fraction(1, 1), False, False, 3000000)
  537. return w
  538. def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
  539. file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
  540. for fname in range(len(text_content)):
  541. call_anchor(name_hash+"/"+str(fname),avatar)
  542. print('called............................................')
  543. ck=cKey(0,254,0,270)
  544. ck_anchor=cKey(0,255,1,320)
  545. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  546. t.Open()
  547. main_timer = 0
  548. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  549. LOGO_OP.Open() # Open the reader
  550. head_duration = LOGO_OP.info.duration
  551. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
  552. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  553. t.AddClip(LOGO_OP_clip)
  554. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  555. bg_head.Open()
  556. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  557. t.AddClip(bg_head_clip)
  558. main_timer += head_duration
  559. bg_head.Close()
  560. LOGO_OP.Close()
  561. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  562. anchor.Open()
  563. #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  564. # location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
  565. #t.AddClip(anchor_clip)
  566. speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
  567. speech.Open()
  568. speech_clip = openshot.Clip(speech)
  569. speech_clip.Position(main_timer)
  570. speech_clip.End(anchor.info.duration)
  571. t.AddClip(speech_clip)
  572. main_timer += anchor.info.duration
  573. anchor.Close()
  574. speech.Close()
  575. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  576. LOGO_ED.Open()
  577. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
  578. ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
  579. t.AddClip(LOGO_ED_clip)
  580. main_timer += LOGO_ED.info.duration
  581. LOGO_ED.Close()
  582. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  583. bg.Open()
  584. bg_times = math.floor(main_timer/bg.info.duration)
  585. left_time = (main_timer) % bg.info.duration
  586. bg_clip_list = [None] * bg_times
  587. bg_list = [None] * bg_times
  588. bg.Close()
  589. bg_timer = head_duration
  590. for idx in range(bg_times):
  591. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  592. bg_list[idx].Open()
  593. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
  594. t.AddClip(bg_clip_list[idx])
  595. bg_timer += bg_list[idx].info.duration
  596. bg_list[idx].Close()
  597. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  598. bg_left.Open()
  599. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  600. t.AddClip(bg_left_clip)
  601. bg_left.Close()
  602. title = openshot.QtImageReader(dir_title+name_hash+".png")
  603. title.Open() # Open the reader
  604. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  605. t.AddClip(title_clip)
  606. w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
  607. w.Open()
  608. frames = int(t.info.fps)*int(main_timer)
  609. for n in range(frames):
  610. f=t.GetFrame(n)
  611. w.WriteFrame(f)
  612. t.Close()
  613. w.Close()
  614. print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
  615. #start adding sub
  616. #add sub
  617. Ctr_Autosub.init()
  618. Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
  619. sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
  620. for subd in sub_dict:
  621. print(subd)
  622. generate_subtitle_image_from_dict(name_hash, sub_dict)
  623. #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  624. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  625. t.Open()
  626. raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
  627. raw.Open()
  628. raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
  629. t.AddClip(raw_clip)
  630. sub_img_list = [None] * len(sub_dict)
  631. sub_clip_list = [None] * len(sub_dict)
  632. for sub_obj in sub_dict:
  633. idx = int(sub_obj['index'])
  634. sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
  635. sub_img_list[idx].Open()
  636. #if sub_obj['duration']>3:
  637. # print('warning')
  638. #print('start:',sub_obj['start'],', duration :', sub_obj['duration'],' content',sub_obj['content'],'idx:',sub_obj['index'])
  639. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
  640. t.AddClip(sub_clip_list[idx])
  641. sub_img_list[idx].Close()
  642. tp = parser()
  643. img_dict_ls = tp.image_clip_info(sub_dict)
  644. img_clip_list = [None]*len(listdir(dir_photo+name_hash))
  645. img_list = [None]*len(img_clip_list)
  646. img_file_ls = listdir(dir_photo+name_hash)
  647. for img_idx in range(len(img_file_ls)):
  648. img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
  649. img_list[img_idx].Open()
  650. img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
  651. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
  652. t.AddClip(img_clip_list[img_idx])
  653. img_list[img_idx].Close()
  654. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  655. anchor.Open()
  656. anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  657. location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
  658. t.AddClip(anchor_clip)
  659. w = video_writer_init(tmp_video_dir+name_hash+".mp4")
  660. w.Open()
  661. frames = int(t.info.fps)*int(main_timer)
  662. for n in range(frames):
  663. f=t.GetFrame(n)
  664. w.WriteFrame(f)
  665. t.Close()
  666. w.Close()
  667. os.remove(tmp_video_dir+name_hash+"raw.mp4")
  668. os.remove(tmp_video_dir+name_hash+"script.txt")
  669. print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
  670. def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  671. print(name)
  672. print(text_content)
  673. print(os.getcwd())
  674. print('sub image made')
  675. print(multiLang)
  676. file_prepare(name, name_hash, text_content,image_urls,multiLang)
  677. sub_list=generate_subtitle_image(name_hash,text_content)
  678. for fname in range(len(text_content)):
  679. call_anchor(name_hash+"/"+str(fname),avatar)
  680. print('step finish')
  681. print('called............................................')
  682. ck=cKey(0,254,0,270)
  683. ck_anchor=cKey(0,255,1,320)
  684. duration = 0
  685. #average layer level is 3
  686. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  687. t.Open()
  688. main_timer = 0
  689. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  690. LOGO_OP.Open() # Open the reader
  691. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  692. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  693. t.AddClip(LOGO_OP_clip)
  694. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  695. bg_head.Open()
  696. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  697. t.AddClip(bg_head_clip)
  698. main_timer += LOGO_OP.info.duration
  699. head_duration = LOGO_OP.info.duration
  700. bg_head.Close()
  701. LOGO_OP.Close()
  702. clip_duration=0
  703. photo_clip_list = [None]*len(text_content)
  704. img_list = [None]*len(text_content)
  705. anchor_clip_list = [None] * len(text_content)
  706. anchor_list = [None] * len(text_content)
  707. audio_clip_list = [None] * len(text_content)
  708. audio_list = [None] * len(text_content)
  709. sub_clip_list = [None] * len(text_content)
  710. sub_img_list = [None] * len(text_content)
  711. idx = 0
  712. for p in listdir(dir_photo+name_hash):
  713. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  714. clip_duration = anchor_list[idx].info.duration
  715. anchor_list[idx].Open()
  716. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  717. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  718. print('avatar is ', avatar)
  719. t.AddClip(anchor_clip_list[idx])
  720. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  721. img_list[idx].Open()
  722. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  723. ,scale_x=0.8,scale_y=0.6825,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  724. t.AddClip(photo_clip_list[idx])
  725. img_list[idx].Close()
  726. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  727. audio_list[idx].Open()
  728. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  729. audio_clip_list[idx].Position(main_timer)
  730. audio_clip_list[idx].End(clip_duration)
  731. t.AddClip(audio_clip_list[idx])
  732. img_list[idx].Close()
  733. anchor_list[idx].Close()
  734. audio_list[idx].Close()
  735. sub_img_list[idx] = [None] * len(sub_list[idx])
  736. sub_clip_list[idx] = [None] * len(sub_list[idx])
  737. sub_timer = 0
  738. for sub_idx in range(len(sub_list[idx])):
  739. sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
  740. sub_img_list[idx][sub_idx].Open()
  741. sub_duration = 0.205*sub_list[idx][sub_idx]['count']
  742. sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
  743. t.AddClip(sub_clip_list[idx][sub_idx])
  744. sub_img_list[idx][sub_idx].Close()
  745. sub_timer += sub_duration
  746. print(sub_list[idx][sub_idx]['path'])
  747. main_timer += clip_duration
  748. idx+=1
  749. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  750. LOGO_ED.Open()
  751. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  752. ,location_x=0.005,location_y=-0.031
  753. ,scale_x=0.8,scale_y=0.6825)
  754. t.AddClip(LOGO_ED_clip)
  755. ED_duration = LOGO_ED.info.duration
  756. LOGO_ED.Close()
  757. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  758. bg.Open()
  759. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  760. left_time = (main_timer+ED_duration) % bg.info.duration
  761. bg_clip_list = [None] * bg_times
  762. bg_list = [None] * bg_times
  763. bg.Close()
  764. bg_timer = head_duration
  765. for idx in range(bg_times):
  766. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  767. bg_list[idx].Open()
  768. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  769. ,end=bg_list[idx].info.duration,ck=ck)
  770. t.AddClip(bg_clip_list[idx])
  771. bg_timer += bg_list[idx].info.duration
  772. bg_list[idx].Close()
  773. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  774. bg_left.Open()
  775. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  776. t.AddClip(bg_left_clip)
  777. bg_left.Close()
  778. title = openshot.QtImageReader(dir_title+name_hash+".png")
  779. title.Open() # Open the reader
  780. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  781. t.AddClip(title_clip)
  782. if freeTrial==1:
  783. print("THIS IS TRIAL")
  784. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  785. wm.Open()
  786. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  787. #t.AddClip(wm_clip)
  788. else:
  789. print("THIS IS NOT TRIAL")
  790. print(freeTrial)
  791. ####start building
  792. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  793. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  794. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  795. openshot.Fraction(1, 1), False, False, 3000000)
  796. w.Open()
  797. #may change duration into t.info.duration
  798. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  799. for n in range(frames):
  800. f=t.GetFrame(n)
  801. w.WriteFrame(f)
  802. #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  803. t.Close()
  804. w.Close()
  805. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  806. def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  807. file_prepare(name, name_hash, text_content,image_urls,1,'eng')
  808. sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
  809. for fname in range(len(text_content)):
  810. call_anchor(name_hash+"/"+str(fname),avatar)
  811. print('step finish')
  812. print('called............................................')
  813. ck=cKey(0,254,0,270)
  814. ck_anchor=cKey(0,255,1,320)
  815. duration = 0
  816. #average layer level is 3
  817. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  818. t.Open()
  819. main_timer = 0
  820. #add logo
  821. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  822. LOGO_OP.Open() # Open the reader
  823. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  824. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  825. t.AddClip(LOGO_OP_clip)
  826. #add background video (head is different)
  827. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  828. bg_head.Open()
  829. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  830. t.AddClip(bg_head_clip)
  831. main_timer += LOGO_OP.info.duration
  832. head_duration = LOGO_OP.info.duration
  833. bg_head.Close()
  834. LOGO_OP.Close()
  835. #prepare empty list
  836. clip_duration=0
  837. photo_clip_list = [None]*len(text_content)
  838. img_list = [None]*len(text_content)
  839. anchor_clip_list = [None] * len(text_content)
  840. anchor_list = [None] * len(text_content)
  841. audio_clip_list = [None] * len(text_content)
  842. audio_list = [None] * len(text_content)
  843. sub_clip_list = [None] * len(text_content)
  844. #openshot image holder
  845. sub_img_list = [None] * len(text_content)
  846. idx = 0
  847. for p in listdir(dir_photo+name_hash):
  848. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  849. clip_duration = anchor_list[idx].info.duration
  850. anchor_list[idx].Open()
  851. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  852. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  853. t.AddClip(anchor_clip_list[idx])
  854. #insert image
  855. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  856. img_list[idx].Open()
  857. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  858. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  859. t.AddClip(photo_clip_list[idx])
  860. img_list[idx].Close()
  861. #insert audio (speech)
  862. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  863. audio_list[idx].Open()
  864. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  865. audio_clip_list[idx].Position(main_timer)
  866. audio_clip_list[idx].End(clip_duration)
  867. t.AddClip(audio_clip_list[idx])
  868. #insert subtitle
  869. sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
  870. sub_img_list[idx].Open()
  871. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
  872. t.AddClip(sub_clip_list[idx])
  873. img_list[idx].Close()
  874. anchor_list[idx].Close()
  875. audio_list[idx].Close()
  876. sub_img_list[idx].Close()
  877. main_timer += clip_duration
  878. idx+=1
  879. LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
  880. LOGO_ED.Open()
  881. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  882. ,location_x=0.005,location_y=-0.031
  883. ,scale_x=0.8,scale_y=0.6825)
  884. t.AddClip(LOGO_ED_clip)
  885. ED_duration = LOGO_ED.info.duration
  886. LOGO_ED.Close()
  887. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  888. bg.Open()
  889. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  890. left_time = (main_timer+ED_duration) % bg.info.duration
  891. bg_clip_list = [None] * bg_times
  892. bg_list = [None] * bg_times
  893. bg.Close()
  894. bg_timer = head_duration
  895. for idx in range(bg_times):
  896. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  897. bg_list[idx].Open()
  898. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  899. ,end=bg_list[idx].info.duration,ck=ck)
  900. t.AddClip(bg_clip_list[idx])
  901. bg_timer += bg_list[idx].info.duration
  902. bg_list[idx].Close()
  903. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  904. bg_left.Open()
  905. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  906. t.AddClip(bg_left_clip)
  907. bg_left.Close()
  908. title = openshot.QtImageReader(dir_title+name_hash+".png")
  909. title.Open() # Open the reader
  910. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  911. t.AddClip(title_clip)
  912. if freeTrial==1:
  913. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  914. wm.Open()
  915. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  916. #t.AddClip(wm_clip)
  917. print("THIS IS TRIAL")
  918. else:
  919. print("THIS IS NOT TRIAL")
  920. print(freeTrial)
  921. ####start building
  922. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  923. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  924. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  925. openshot.Fraction(1, 1), False, False, 3000000)
  926. w.Open()
  927. #may change duration into t.info.duration
  928. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  929. for n in range(frames):
  930. f=t.GetFrame(n)
  931. w.WriteFrame(f)
  932. #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  933. t.Close()
  934. w.Close()
  935. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  936. #line notifs
  937. import pyttsx3
  938. def make_speech(text):
  939. engine = pyttsx3.init()
  940. #voices = engine.getProperty('voices')
  941. engine.setProperty('voice', 'Mandarin')
  942. engine.save_to_file(text, '/app/speech.mp3')
  943. engine.runAndWait()
  944. class video_service(rpyc.Service):
  945. def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  946. print('ML:'+str(multiLang))
  947. anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial)
  948. def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  949. anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial)
  950. def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
  951. print('ML:'+str(multiLang))#this is long video version,
  952. video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
  953. def exposed_make_speech(self,text):
  954. make_speech(text)
  955. from rpyc.utils.server import ThreadedServer
  956. t = ThreadedServer(video_service, port=8858)
  957. print('service started')
  958. t.start()