openshot_video_generator.py 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103
  1. from os import listdir
  2. from os.path import isfile, isdir, join
  3. import openshot
  4. import threading
  5. import zhtts
  6. import os
  7. import urllib
  8. from typing import List
  9. import requests
  10. from pydantic import BaseModel
  11. from bs4 import BeautifulSoup
  12. from PIL import Image,ImageDraw,ImageFont
  13. import pyttsx3
  14. import rpyc
  15. import random
  16. import re
  17. import time
  18. import math
  19. import dataset
  20. from datetime import datetime
  21. from gtts import gTTS
  22. import ffmpy
  23. from difflib import SequenceMatcher
  24. import difflib
  25. from autosub import DEFAULT_CONCURRENCY
  26. from autosub import DEFAULT_SUBTITLE_FORMAT
  27. from pytranscriber.control.ctr_main import Ctr_Main
  28. from pytranscriber.control.ctr_autosub import Ctr_Autosub
  29. import multiprocessing
  30. from itertools import groupby
  31. from operator import itemgetter
  32. from openUtil.parser import parser
  33. import pandas as pd
  34. import numpy as np
  35. import jieba
  36. import jieba.posseg as pseg
  37. import urllib.request
  38. import librosa
  39. from pydub import AudioSegment
  40. from pydub.silence import split_on_silence
  41. import itertools
  42. from hakkaUtil import *
  43. dir_sound = 'mp3_track/'
  44. dir_photo = 'photo/'
  45. dir_text = 'text_file/'
  46. dir_video = 'video_material/'
  47. dir_title = 'title/'
  48. dir_subtitle = 'subtitle/'
  49. dir_anchor = 'anchor_raw/'
  50. tmp_video_dir = 'tmp_video/'
  51. video_sub_folder = 'ai_anchor_video/'
  52. dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
  53. def notify_group(msg):
  54. glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
  55. for gid in glist:
  56. headers = {
  57. "Authorization": "Bearer " + gid,
  58. "Content-Type": "application/x-www-form-urlencoded"
  59. }
  60. params = {"message": msg}
  61. r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
  62. def cKey(r,g,b,fuzz):
  63. col=openshot.Color()
  64. col.red=openshot.Keyframe(r)
  65. col.green=openshot.Keyframe(g)
  66. col.blue=openshot.Keyframe(b)
  67. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  68. def video_photo_clip(vid=None,layer=None, position=None, end=None
  69. ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
  70. clip = openshot.Clip(vid)
  71. clip.Layer(layer)
  72. clip.Position(position)
  73. clip.End(end)
  74. clip.scale_x=openshot.Keyframe(scale_x)
  75. clip.scale_y=openshot.Keyframe(scale_y)
  76. clip.location_x=openshot.Keyframe(location_x)
  77. clip.location_y=openshot.Keyframe(location_y)
  78. if ck!=None:
  79. clip.AddEffect(ck)
  80. if audio==True:
  81. clip.has_audio=openshot.Keyframe(1)
  82. else:
  83. clip.has_audio=openshot.Keyframe(0)
  84. return clip
  85. def listener_progress(string, percent):
  86. True
  87. def myunichchar(unicode_char):
  88. mb_string = unicode_char.encode('big5')
  89. try:
  90. unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
  91. except NameError:
  92. unicode_char = chr(mb_string[0] << 8 | mb_string[1])
  93. return unicode_char
  94. def get_url_type(url):
  95. print('---------------------------------------------')
  96. print(url)
  97. req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
  98. r = urllib.request.urlopen(req)
  99. contentType = r.getheader('Content-Type')
  100. print(contentType)
  101. print('-------------------------------------------------')
  102. return contentType
  103. def make_dir(name_hash):
  104. for direct in dir_list:
  105. if not os.path.isdir(direct):
  106. os.mkdir(direct)
  107. try:
  108. os.mkdir(dir_photo+name_hash)
  109. except FileExistsError:
  110. print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash , " already exists")
  111. try:
  112. os.mkdir(dir_text+name_hash)
  113. except FileExistsError:
  114. print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash , " already exists")
  115. try:
  116. os.mkdir(dir_sound+name_hash)
  117. except FileExistsError:
  118. print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash , " already exists")
  119. try:
  120. os.mkdir(dir_anchor+name_hash)
  121. except FileExistsError:
  122. print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash , " already exists")
  123. try:
  124. os.mkdir(dir_subtitle+name_hash)
  125. except FileExistsError:
  126. print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash , " already exists")
  127. def hakkaTTS(mp3_path,ch_sentence,gender):
  128. download = False #如果要下載才需要Ture
  129. hakka_100 = import_hakka_100()
  130. word_data,multi_sound = import_data()
  131. if download:
  132. download_mp3(word_data,multi_sound)
  133. download_hakka_100(hakka_100)
  134. ch_word_list = list(itertools.chain(*word_data['華語詞義集'].tolist())) + hakka_100.chinese_clean.tolist()
  135. import_jieba_userdict(ch_word_list=ch_word_list, userDict_path='userDict.txt')
  136. gen_hakka_tts(word_data,multi_sound,hakka_100,ch_sentence,gender,mp3_path)
  137. def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  138. make_dir(name_hash)
  139. img_num = 1
  140. for imgu in image_urls:
  141. if get_url_type(imgu) =='video/mp4':
  142. r=requests.get(imgu)
  143. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  144. for chunk in r.iter_content(chunk_size=255):
  145. if chunk:
  146. f.write(chunk)
  147. f.close()
  148. else:
  149. im = Image.open(requests.get(imgu, stream=True).raw)
  150. im= im.convert("RGB")
  151. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  152. img_num+=1
  153. #save text
  154. txt_idx=0
  155. for txt in text_content:
  156. text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
  157. text_file.write(txt)
  158. text_file.close()
  159. txt_idx+=1
  160. print("text file made")
  161. #make mp3
  162. txt_idx = 0
  163. for txt in text_content:
  164. if multiLang==3:
  165. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,0)
  166. elif mulitLang==4:
  167. hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,1)
  168. if lang!='zh' or multiLang==1:
  169. if lang!='zh':
  170. tts = gTTS(txt)
  171. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  172. else:
  173. tts = gTTS(txt,lang='zh-tw')
  174. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  175. #speed up
  176. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  177. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  178. ff.run()
  179. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  180. else:
  181. print('use zhtts')
  182. tts = zhtts.TTS()
  183. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  184. txt_idx+=1
  185. print("mp3 file made")
  186. #make title as image
  187. txt2image_title(name, dir_title+name_hash+".png",lang)
  188. def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  189. make_dir(name_hash)
  190. img_num = 1
  191. for imgu in image_urls:
  192. if get_url_type(imgu) =='video/mp4':
  193. r=requests.get(imgu)
  194. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  195. for chunk in r.iter_content(chunk_size=255):
  196. if chunk:
  197. f.write(chunk)
  198. f.close()
  199. else:
  200. im = Image.open(requests.get(imgu, stream=True).raw)
  201. im= im.convert("RGB")
  202. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  203. img_num+=1
  204. #make mp3
  205. text_parser = parser()
  206. txt_idx = 0
  207. for txt in text_content:
  208. rep_list = text_parser.replace_list(txt)
  209. for reptxt in rep_list:
  210. txt = txt.replace(reptxt,'')
  211. if lang!='zh' or multiLang==1:
  212. if lang!='zh':
  213. tts = gTTS(txt)
  214. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  215. else:
  216. tts = gTTS(txt,lang='zh-tw')
  217. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  218. #speed up
  219. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  220. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  221. ff.run()
  222. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  223. else:
  224. print('use zhtts')
  225. tts = zhtts.TTS()
  226. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  227. txt_idx+=1
  228. print("mp3 file made")
  229. #make title as image
  230. txt2image_title(name, dir_title+name_hash+".png",lang)
  231. def txt2image(content, save_target,lang='zh'):
  232. unicode_text = trim_punctuation(content)
  233. font = ''
  234. if lang=='zh':
  235. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
  236. else :
  237. font = ImageFont.truetype(font="font/arial.ttf", size=38)
  238. text_width, text_height = font.getsize(unicode_text)
  239. canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
  240. draw = ImageDraw.Draw(canvas)
  241. text= unicode_text
  242. draw.text((5,5), text, (255, 255, 0), font)
  243. canvas.save(save_target, "PNG")
  244. def txt2image_title(content, save_target, lang='zh'):
  245. unicode_text = trim_punctuation(content)
  246. font = ''
  247. if lang=='zh':
  248. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
  249. else :
  250. font = ImageFont.truetype(font="font/arial.ttf", size=22)
  251. text_width, text_height = font.getsize(unicode_text)
  252. canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
  253. draw = ImageDraw.Draw(canvas)
  254. text= unicode_text
  255. draw.text((5,5), text, (17, 41, 167), font)
  256. canvas.save(save_target, "PNG")
  257. def call_anchor(fileName,avatar):
  258. conn = rpyc.classic.connect("192.168.1.111",18812)
  259. ros = conn.modules.os
  260. rsys = conn.modules.sys
  261. fr=open(dir_sound+fileName+".mp3",'rb')# voice
  262. #warning!!! file my be replaced by other process
  263. fw=conn.builtins.open('/tmp/output.mp3','wb')
  264. while True:
  265. b=fr.read(1024)
  266. if b:
  267. fw.write(b)
  268. else:
  269. break
  270. fr.close()
  271. fw.close()
  272. val=random.randint(1000000,9999999)
  273. ros.chdir('/home/jared/to_video')
  274. ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
  275. while True:
  276. print('waiting...')
  277. if ros.path.exists('/tmp/results/'+str(val)):
  278. break
  279. time.sleep(5)
  280. print('waiting...')
  281. fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
  282. fw=open(dir_anchor+fileName+".mp4",'wb')
  283. while True:
  284. b=fr.read(1024)
  285. if b:
  286. fw.write(b)
  287. else:
  288. break
  289. fr.close()
  290. fw.close()
  291. def syllable_count(word):
  292. word = word.lower()
  293. count = 0
  294. vowels = "aeiouy"
  295. if word[0] in vowels:
  296. count += 1
  297. for index in range(1, len(word)):
  298. if word[index] in vowels and word[index - 1] not in vowels:
  299. count += 1
  300. if word.endswith("e"):
  301. count -= 1
  302. if count == 0:
  303. count += 1
  304. return count
  305. def split_sentence(in_str, maxLen):
  306. re.findall(r'[\u4e00-\u9fff]+', in_str)
  307. zh_idx = []
  308. eng_idx= []
  309. for i in range(len(in_str)):
  310. if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
  311. zh_idx.append(i)
  312. else:
  313. eng_idx.append(i)
  314. space_index = [m.start() for m in re.finditer(' ', in_str)]
  315. for idx in space_index:
  316. eng_idx.remove(idx)
  317. eng_range_list = []
  318. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  319. eng_range = list(map(itemgetter(1), g))
  320. eng_range_list.append(eng_range)
  321. total_syllable = 0
  322. for i in range(len(eng_range_list)):
  323. total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  324. for i in range(len(zh_idx)):
  325. total_syllable+=1
  326. #final chchchchchc[en][en][en]
  327. #[en] is a vocabulary dict with occurence of image
  328. zh_eng_idx_list = []
  329. i = 0
  330. while i < len(in_str):
  331. if in_str[i]==' ':
  332. i+=1
  333. if i in zh_idx:
  334. zh_eng_idx_list.append(i)
  335. i+=1
  336. if i in eng_idx:
  337. for ls in eng_range_list:
  338. if i in ls:
  339. zh_eng_idx_list.append(ls)
  340. i = ls[-1]+1
  341. break
  342. zh_eng_dict_list = [{'content':'','time_ratio':0}]
  343. idx = 0
  344. current_len = 0
  345. sen_idx = 0
  346. while idx < len(zh_eng_idx_list):
  347. str_from_idx = ''
  348. sylla_cnt = 1
  349. if type(zh_eng_idx_list[idx])==type([]):
  350. str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
  351. sylla_cnt = syllable_count(str_from_idx)
  352. else:
  353. str_from_idx = in_str[zh_eng_idx_list[idx]]
  354. if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
  355. zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
  356. zh_eng_dict_list.append({'content':'','time_ratio':0})
  357. sen_idx+=1
  358. current_len = 0
  359. else:
  360. current_len += sylla_cnt
  361. zh_eng_dict_list[sen_idx]['content'] += str_from_idx
  362. idx+=1
  363. total_ratio = 0
  364. for obj in zh_eng_dict_list:
  365. total_ratio+=obj['time_ratio']
  366. zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
  367. return zh_eng_dict_list
  368. def parse_script(file_path,gt_list):
  369. with open(file_path, 'r',encoding="utf-8") as f:
  370. raw_lines = [line.strip() for line in f]
  371. lines = adjustSub_by_text_similarity(gt_list,raw_lines)
  372. text_parser = parser()
  373. #make dict
  374. dict_list = []
  375. for idx in range(len(lines)):
  376. script={}
  377. rep_ls = text_parser.replace_list(lines[idx])
  378. line_content = lines[idx]
  379. for reptxt in rep_ls:
  380. line_content = line_content.replace(reptxt,'')
  381. if len(rep_ls)!=0:
  382. script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
  383. script['content'] = line_content
  384. time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
  385. start = time_raw[0].split(':')
  386. stop = time_raw[1].split(':')
  387. script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
  388. script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
  389. dict_list.append(script)
  390. #merge duplicated sentences
  391. skip_list = []
  392. script_not_dup_list = []
  393. for idx in range(len(dict_list)):
  394. if idx not in skip_list:
  395. dup_list = []
  396. found = 0
  397. for idx_inner in range(len(dict_list)):
  398. if dict_list[idx_inner]['content'] == dict_list[idx]['content'] and idx <= idx_inner:
  399. dup_list.append(idx_inner)
  400. skip_list.append(idx_inner)
  401. found += 1
  402. if found != 0 and dict_list[idx_inner]['content']!=dict_list[idx]['content'] and idx <= idx_inner:
  403. found = 0
  404. break
  405. for dup_idx in dup_list:
  406. if dup_idx == min(dup_list):
  407. dict_list[dup_idx]['type'] = 'lead_sentence'
  408. else:
  409. dict_list[dup_idx]['type'] = 'duplicated'
  410. dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
  411. if dict_list[idx]['type'] == 'lead_sentence':
  412. script_not_dup_list.append(dict_list[idx])
  413. new_idx = 0
  414. splitted_dict = []
  415. for dic in script_not_dup_list:
  416. dic_idx = 0
  417. accumulated_duration = 0
  418. duration = dic['stop']-dic['start']
  419. for sub_dic in split_sentence(dic['content'],13):
  420. new_dic = {}
  421. new_dic['index'] = new_idx
  422. if 'image_idx' in dic:
  423. new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
  424. new_idx+=1
  425. ind_duration = duration * sub_dic['time_ratio']
  426. new_dic['start'] = dic['start'] + accumulated_duration
  427. accumulated_duration += ind_duration
  428. new_dic['content'] = sub_dic['content']
  429. new_dic['duration'] = ind_duration*0.7
  430. splitted_dict.append(new_dic)
  431. return splitted_dict
  432. def adjustSub_by_text_similarity(gts_in,gens_raw):
  433. #call by value only
  434. gts = gts_in[:]
  435. text_parser = parser()
  436. for i in range(len(gts)):
  437. rep_ls = text_parser.replace_list(gts[i])
  438. for reptxt in rep_ls:
  439. gts[i] = gts[i].replace(reptxt,'')
  440. gens = []
  441. for idx in range(int((len(gens_raw)+1)/4)):
  442. gens.append(gens_raw[idx*4+2])
  443. combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
  444. combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
  445. alls = gts #+ combine2 + combine3
  446. adjusted = [None]*len(gens)
  447. duplicated_list = []
  448. for idx in range(len(gens)):
  449. match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
  450. if len(match_text) != 0:
  451. if match_text[0] not in duplicated_list:
  452. adjusted[idx] = match_text[0]
  453. duplicated_list.append(match_text[0])
  454. else:
  455. if match_text[0] == adjusted[idx-1]:
  456. adjusted[idx] = match_text[0]
  457. else:
  458. found = 0
  459. for mt in match_text:
  460. if mt not in duplicated_list:
  461. adjusted[idx] = mt
  462. found += 1
  463. break
  464. if found ==0:
  465. adjusted[idx] = ' '
  466. else :
  467. adjusted[idx] = ' '
  468. combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
  469. combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
  470. alls_tag = gts_in #+ combine2_tag + combine3_tag
  471. for idx in range(len(adjusted)):
  472. match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
  473. adjusted[idx] = match_text[0]
  474. return adjusted
  475. def trim_punctuation(s):
  476. pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
  477. pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
  478. res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
  479. return res
  480. def splitter(s):
  481. for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
  482. yield sent
  483. def split_by_pun(s):
  484. res = list(splitter(s))
  485. return res
  486. def generate_subtitle_image_from_dict(name_hash, sub_dict):
  487. for script in sub_dict:
  488. sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  489. sub = script['content']
  490. txt2image(sub,sv_path)
  491. def generate_subtitle_image(name_hash,text_content):
  492. img_list = [None]*len(text_content)
  493. for idx in range(len(text_content)):
  494. img_list[idx]=[]
  495. senList = split_by_pun(text_content[idx])
  496. for inner_idx in range(len(senList)):
  497. sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
  498. sub = senList[inner_idx]
  499. txt2image(sub,sv_path)
  500. clean_content = trim_punctuation(sub)
  501. re.findall(r'[\u4e00-\u9fff]+', clean_content)
  502. zh_idx = []
  503. eng_idx= []
  504. for i in range(len(clean_content)):
  505. if clean_content[i] > u'\u4e00' and clean_content[i] < u'\u9fff':
  506. zh_idx.append(i)
  507. else:
  508. eng_idx.append(i)
  509. space_index = [m.start() for m in re.finditer(' ', clean_content)]
  510. for s_idx in space_index:
  511. eng_idx.remove(s_idx)
  512. eng_range_list = []
  513. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  514. eng_range = list(map(itemgetter(1), g))
  515. eng_range_list.append(eng_range)
  516. total_syllable = 0
  517. for i in range(len(eng_range_list)):
  518. total_syllable += (syllable_count(clean_content[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  519. for i in range(len(zh_idx)):
  520. total_syllable+=1
  521. img_list[idx]+=[{"count":total_syllable,"path":sv_path}]
  522. return img_list
  523. def generate_subtitle_image_ENG(name_hash,text_content):
  524. img_list = [None]*len(text_content)
  525. for idx in range(len(text_content)):
  526. sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
  527. sub = text_content[idx]
  528. txt2image(sub, sv_path,lang='eng')
  529. img_list[idx] = sv_path
  530. return img_list
  531. def video_writer_init(path):
  532. w = openshot.FFmpegWriter(path)
  533. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  534. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  535. openshot.Fraction(1, 1), False, False, 3000000)
  536. return w
  537. def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
  538. file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
  539. for fname in range(len(text_content)):
  540. call_anchor(name_hash+"/"+str(fname),avatar)
  541. print('called............................................')
  542. ck=cKey(0,254,0,270)
  543. ck_anchor=cKey(0,255,1,320)
  544. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  545. t.Open()
  546. main_timer = 0
  547. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  548. LOGO_OP.Open() # Open the reader
  549. head_duration = LOGO_OP.info.duration
  550. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
  551. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  552. t.AddClip(LOGO_OP_clip)
  553. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  554. bg_head.Open()
  555. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  556. t.AddClip(bg_head_clip)
  557. main_timer += head_duration
  558. bg_head.Close()
  559. LOGO_OP.Close()
  560. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  561. anchor.Open()
  562. #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  563. # location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
  564. #t.AddClip(anchor_clip)
  565. speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
  566. speech.Open()
  567. speech_clip = openshot.Clip(speech)
  568. speech_clip.Position(main_timer)
  569. speech_clip.End(anchor.info.duration)
  570. t.AddClip(speech_clip)
  571. main_timer += anchor.info.duration
  572. anchor.Close()
  573. speech.Close()
  574. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  575. LOGO_ED.Open()
  576. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
  577. ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
  578. t.AddClip(LOGO_ED_clip)
  579. main_timer += LOGO_ED.info.duration
  580. LOGO_ED.Close()
  581. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  582. bg.Open()
  583. bg_times = math.floor(main_timer/bg.info.duration)
  584. left_time = (main_timer) % bg.info.duration
  585. bg_clip_list = [None] * bg_times
  586. bg_list = [None] * bg_times
  587. bg.Close()
  588. bg_timer = head_duration
  589. for idx in range(bg_times):
  590. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  591. bg_list[idx].Open()
  592. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
  593. t.AddClip(bg_clip_list[idx])
  594. bg_timer += bg_list[idx].info.duration
  595. bg_list[idx].Close()
  596. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  597. bg_left.Open()
  598. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  599. t.AddClip(bg_left_clip)
  600. bg_left.Close()
  601. title = openshot.QtImageReader(dir_title+name_hash+".png")
  602. title.Open() # Open the reader
  603. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  604. t.AddClip(title_clip)
  605. w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
  606. w.Open()
  607. frames = int(t.info.fps)*int(main_timer)
  608. for n in range(frames):
  609. f=t.GetFrame(n)
  610. w.WriteFrame(f)
  611. t.Close()
  612. w.Close()
  613. print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
  614. #start adding sub
  615. #add sub
  616. Ctr_Autosub.init()
  617. Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
  618. sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
  619. for subd in sub_dict:
  620. print(subd)
  621. generate_subtitle_image_from_dict(name_hash, sub_dict)
  622. #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  623. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  624. t.Open()
  625. raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
  626. raw.Open()
  627. raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
  628. t.AddClip(raw_clip)
  629. sub_img_list = [None] * len(sub_dict)
  630. sub_clip_list = [None] * len(sub_dict)
  631. for sub_obj in sub_dict:
  632. idx = int(sub_obj['index'])
  633. sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
  634. sub_img_list[idx].Open()
  635. #if sub_obj['duration']>3:
  636. # print('warning')
  637. #print('start:',sub_obj['start'],', duration :', sub_obj['duration'],' content',sub_obj['content'],'idx:',sub_obj['index'])
  638. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
  639. t.AddClip(sub_clip_list[idx])
  640. sub_img_list[idx].Close()
  641. tp = parser()
  642. img_dict_ls = tp.image_clip_info(sub_dict)
  643. img_clip_list = [None]*len(listdir(dir_photo+name_hash))
  644. img_list = [None]*len(img_clip_list)
  645. img_file_ls = listdir(dir_photo+name_hash)
  646. for img_idx in range(len(img_file_ls)):
  647. img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
  648. img_list[img_idx].Open()
  649. img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
  650. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
  651. t.AddClip(img_clip_list[img_idx])
  652. img_list[img_idx].Close()
  653. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  654. anchor.Open()
  655. anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  656. location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
  657. t.AddClip(anchor_clip)
  658. w = video_writer_init(tmp_video_dir+name_hash+".mp4")
  659. w.Open()
  660. frames = int(t.info.fps)*int(main_timer)
  661. for n in range(frames):
  662. f=t.GetFrame(n)
  663. w.WriteFrame(f)
  664. t.Close()
  665. w.Close()
  666. os.remove(tmp_video_dir+name_hash+"raw.mp4")
  667. os.remove(tmp_video_dir+name_hash+"script.txt")
  668. print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
  669. def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  670. print(name)
  671. print(text_content)
  672. print(os.getcwd())
  673. print('sub image made')
  674. print(multiLang)
  675. file_prepare(name, name_hash, text_content,image_urls,multiLang)
  676. sub_list=generate_subtitle_image(name_hash,text_content)
  677. for fname in range(len(text_content)):
  678. call_anchor(name_hash+"/"+str(fname),avatar)
  679. print('step finish')
  680. print('called............................................')
  681. ck=cKey(0,254,0,270)
  682. ck_anchor=cKey(0,255,1,320)
  683. duration = 0
  684. #average layer level is 3
  685. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  686. t.Open()
  687. main_timer = 0
  688. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  689. LOGO_OP.Open() # Open the reader
  690. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  691. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  692. t.AddClip(LOGO_OP_clip)
  693. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  694. bg_head.Open()
  695. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  696. t.AddClip(bg_head_clip)
  697. main_timer += LOGO_OP.info.duration
  698. head_duration = LOGO_OP.info.duration
  699. bg_head.Close()
  700. LOGO_OP.Close()
  701. clip_duration=0
  702. photo_clip_list = [None]*len(text_content)
  703. img_list = [None]*len(text_content)
  704. anchor_clip_list = [None] * len(text_content)
  705. anchor_list = [None] * len(text_content)
  706. audio_clip_list = [None] * len(text_content)
  707. audio_list = [None] * len(text_content)
  708. sub_clip_list = [None] * len(text_content)
  709. sub_img_list = [None] * len(text_content)
  710. idx = 0
  711. for p in listdir(dir_photo+name_hash):
  712. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  713. clip_duration = anchor_list[idx].info.duration
  714. anchor_list[idx].Open()
  715. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  716. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  717. print('avatar is ', avatar)
  718. t.AddClip(anchor_clip_list[idx])
  719. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  720. img_list[idx].Open()
  721. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  722. ,scale_x=0.8,scale_y=0.6825,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  723. t.AddClip(photo_clip_list[idx])
  724. img_list[idx].Close()
  725. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  726. audio_list[idx].Open()
  727. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  728. audio_clip_list[idx].Position(main_timer)
  729. audio_clip_list[idx].End(clip_duration)
  730. t.AddClip(audio_clip_list[idx])
  731. img_list[idx].Close()
  732. anchor_list[idx].Close()
  733. audio_list[idx].Close()
  734. sub_img_list[idx] = [None] * len(sub_list[idx])
  735. sub_clip_list[idx] = [None] * len(sub_list[idx])
  736. sub_timer = 0
  737. for sub_idx in range(len(sub_list[idx])):
  738. sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
  739. sub_img_list[idx][sub_idx].Open()
  740. sub_duration = 0.205*sub_list[idx][sub_idx]['count']
  741. sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
  742. t.AddClip(sub_clip_list[idx][sub_idx])
  743. sub_img_list[idx][sub_idx].Close()
  744. sub_timer += sub_duration
  745. print(sub_list[idx][sub_idx]['path'])
  746. main_timer += clip_duration
  747. idx+=1
  748. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  749. LOGO_ED.Open()
  750. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  751. ,location_x=0.005,location_y=-0.031
  752. ,scale_x=0.8,scale_y=0.6825)
  753. t.AddClip(LOGO_ED_clip)
  754. ED_duration = LOGO_ED.info.duration
  755. LOGO_ED.Close()
  756. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  757. bg.Open()
  758. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  759. left_time = (main_timer+ED_duration) % bg.info.duration
  760. bg_clip_list = [None] * bg_times
  761. bg_list = [None] * bg_times
  762. bg.Close()
  763. bg_timer = head_duration
  764. for idx in range(bg_times):
  765. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  766. bg_list[idx].Open()
  767. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  768. ,end=bg_list[idx].info.duration,ck=ck)
  769. t.AddClip(bg_clip_list[idx])
  770. bg_timer += bg_list[idx].info.duration
  771. bg_list[idx].Close()
  772. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  773. bg_left.Open()
  774. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  775. t.AddClip(bg_left_clip)
  776. bg_left.Close()
  777. title = openshot.QtImageReader(dir_title+name_hash+".png")
  778. title.Open() # Open the reader
  779. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  780. t.AddClip(title_clip)
  781. if freeTrial==1:
  782. print("THIS IS TRIAL")
  783. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  784. wm.Open()
  785. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  786. t.AddClip(wm_clip)
  787. else:
  788. print("THIS IS NOT TRIAL")
  789. print(freeTrial)
  790. ####start building
  791. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  792. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  793. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  794. openshot.Fraction(1, 1), False, False, 3000000)
  795. w.Open()
  796. #may change duration into t.info.duration
  797. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  798. for n in range(frames):
  799. f=t.GetFrame(n)
  800. w.WriteFrame(f)
  801. #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  802. t.Close()
  803. w.Close()
  804. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  805. def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  806. file_prepare(name, name_hash, text_content,image_urls,1,'eng')
  807. sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
  808. for fname in range(len(text_content)):
  809. call_anchor(name_hash+"/"+str(fname),avatar)
  810. print('step finish')
  811. print('called............................................')
  812. ck=cKey(0,254,0,270)
  813. ck_anchor=cKey(0,255,1,320)
  814. duration = 0
  815. #average layer level is 3
  816. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  817. t.Open()
  818. main_timer = 0
  819. #add logo
  820. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  821. LOGO_OP.Open() # Open the reader
  822. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  823. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  824. t.AddClip(LOGO_OP_clip)
  825. #add background video (head is different)
  826. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  827. bg_head.Open()
  828. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  829. t.AddClip(bg_head_clip)
  830. main_timer += LOGO_OP.info.duration
  831. head_duration = LOGO_OP.info.duration
  832. bg_head.Close()
  833. LOGO_OP.Close()
  834. #prepare empty list
  835. clip_duration=0
  836. photo_clip_list = [None]*len(text_content)
  837. img_list = [None]*len(text_content)
  838. anchor_clip_list = [None] * len(text_content)
  839. anchor_list = [None] * len(text_content)
  840. audio_clip_list = [None] * len(text_content)
  841. audio_list = [None] * len(text_content)
  842. sub_clip_list = [None] * len(text_content)
  843. #openshot image holder
  844. sub_img_list = [None] * len(text_content)
  845. idx = 0
  846. for p in listdir(dir_photo+name_hash):
  847. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  848. clip_duration = anchor_list[idx].info.duration
  849. anchor_list[idx].Open()
  850. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  851. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  852. t.AddClip(anchor_clip_list[idx])
  853. #insert image
  854. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  855. img_list[idx].Open()
  856. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  857. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  858. t.AddClip(photo_clip_list[idx])
  859. img_list[idx].Close()
  860. #insert audio (speech)
  861. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  862. audio_list[idx].Open()
  863. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  864. audio_clip_list[idx].Position(main_timer)
  865. audio_clip_list[idx].End(clip_duration)
  866. t.AddClip(audio_clip_list[idx])
  867. #insert subtitle
  868. sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
  869. sub_img_list[idx].Open()
  870. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
  871. t.AddClip(sub_clip_list[idx])
  872. img_list[idx].Close()
  873. anchor_list[idx].Close()
  874. audio_list[idx].Close()
  875. sub_img_list[idx].Close()
  876. main_timer += clip_duration
  877. idx+=1
  878. LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
  879. LOGO_ED.Open()
  880. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  881. ,location_x=0.005,location_y=-0.031
  882. ,scale_x=0.8,scale_y=0.6825)
  883. t.AddClip(LOGO_ED_clip)
  884. ED_duration = LOGO_ED.info.duration
  885. LOGO_ED.Close()
  886. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  887. bg.Open()
  888. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  889. left_time = (main_timer+ED_duration) % bg.info.duration
  890. bg_clip_list = [None] * bg_times
  891. bg_list = [None] * bg_times
  892. bg.Close()
  893. bg_timer = head_duration
  894. for idx in range(bg_times):
  895. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  896. bg_list[idx].Open()
  897. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  898. ,end=bg_list[idx].info.duration,ck=ck)
  899. t.AddClip(bg_clip_list[idx])
  900. bg_timer += bg_list[idx].info.duration
  901. bg_list[idx].Close()
  902. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  903. bg_left.Open()
  904. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  905. t.AddClip(bg_left_clip)
  906. bg_left.Close()
  907. title = openshot.QtImageReader(dir_title+name_hash+".png")
  908. title.Open() # Open the reader
  909. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  910. t.AddClip(title_clip)
  911. if freeTrial==1:
  912. wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
  913. wm.Open()
  914. wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
  915. t.AddClip(wm_clip)
  916. print("THIS IS TRIAL")
  917. else:
  918. print("THIS IS NOT TRIAL")
  919. print(freeTrial)
  920. ####start building
  921. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  922. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  923. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  924. openshot.Fraction(1, 1), False, False, 3000000)
  925. w.Open()
  926. #may change duration into t.info.duration
  927. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  928. for n in range(frames):
  929. f=t.GetFrame(n)
  930. w.WriteFrame(f)
  931. #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  932. t.Close()
  933. w.Close()
  934. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  935. #line notifs
  936. import pyttsx3
  937. def make_speech(text):
  938. engine = pyttsx3.init()
  939. #voices = engine.getProperty('voices')
  940. engine.setProperty('voice', 'Mandarin')
  941. engine.save_to_file(text, '/app/speech.mp3')
  942. engine.runAndWait()
  943. class video_service(rpyc.Service):
  944. def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
  945. print('ML:'+str(multiLang))
  946. anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial)
  947. def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
  948. anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial)
  949. def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
  950. print('ML:'+str(multiLang))#this is long video version,
  951. video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
  952. def exposed_make_speech(self,text):
  953. make_speech(text)
  954. from rpyc.utils.server import ThreadedServer
  955. t = ThreadedServer(video_service, port=8858)
  956. print('service started')
  957. t.start()