openshot_video_generator.py 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. from os import listdir
  2. from os.path import isfile, isdir, join
  3. import openshot
  4. import threading
  5. import zhtts
  6. import os
  7. import urllib
  8. from typing import List
  9. import requests
  10. from pydantic import BaseModel
  11. from bs4 import BeautifulSoup
  12. from PIL import Image,ImageDraw,ImageFont
  13. import pyttsx3
  14. import rpyc
  15. import random
  16. import re
  17. import time
  18. import math
  19. import dataset
  20. from datetime import datetime
  21. from gtts import gTTS
  22. import ffmpy
  23. from difflib import SequenceMatcher
  24. import difflib
  25. from autosub import DEFAULT_CONCURRENCY
  26. from autosub import DEFAULT_SUBTITLE_FORMAT
  27. from pytranscriber.control.ctr_main import Ctr_Main
  28. from pytranscriber.control.ctr_autosub import Ctr_Autosub
  29. import multiprocessing
  30. from itertools import groupby
  31. from operator import itemgetter
  32. from util.parser import parser
  33. dir_sound = 'mp3_track/'
  34. dir_photo = 'photo/'
  35. dir_text = 'text_file/'
  36. dir_video = 'video_material/'
  37. dir_title = 'title/'
  38. dir_subtitle = 'subtitle/'
  39. dir_anchor = 'anchor_raw/'
  40. tmp_video_dir = 'tmp_video/'
  41. video_sub_folder = 'ai_anchor_video/'
  42. dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
  43. def notify_group(msg):
  44. glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
  45. for gid in glist:
  46. headers = {
  47. "Authorization": "Bearer " + gid,
  48. "Content-Type": "application/x-www-form-urlencoded"
  49. }
  50. params = {"message": msg}
  51. r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
  52. def cKey(r,g,b,fuzz):
  53. col=openshot.Color()
  54. col.red=openshot.Keyframe(r)
  55. col.green=openshot.Keyframe(g)
  56. col.blue=openshot.Keyframe(b)
  57. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  58. def video_photo_clip(vid=None,layer=None, position=None, end=None
  59. ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
  60. clip = openshot.Clip(vid)
  61. clip.Layer(layer)
  62. clip.Position(position)
  63. clip.End(end)
  64. clip.scale_x=openshot.Keyframe(scale_x)
  65. clip.scale_y=openshot.Keyframe(scale_y)
  66. clip.location_x=openshot.Keyframe(location_x)
  67. clip.location_y=openshot.Keyframe(location_y)
  68. if ck!=None:
  69. clip.AddEffect(ck)
  70. if audio==True:
  71. clip.has_audio=openshot.Keyframe(1)
  72. else:
  73. clip.has_audio=openshot.Keyframe(0)
  74. return clip
  75. def listener_progress(string, percent):
  76. True
  77. def myunichchar(unicode_char):
  78. mb_string = unicode_char.encode('big5')
  79. try:
  80. unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
  81. except NameError:
  82. unicode_char = chr(mb_string[0] << 8 | mb_string[1])
  83. return unicode_char
  84. def get_url_type(url):
  85. print('---------------------------------------------')
  86. req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
  87. r = urllib.request.urlopen(req)
  88. contentType = r.getheader('Content-Type')
  89. print(contentType)
  90. print('-------------------------------------------------')
  91. return contentType
  92. def make_dir(name_hash):
  93. for direct in dir_list:
  94. if not os.path.isdir(direct):
  95. os.mkdir(direct)
  96. try:
  97. os.mkdir(dir_photo+name_hash)
  98. except FileExistsError:
  99. print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash , " already exists")
  100. try:
  101. os.mkdir(dir_text+name_hash)
  102. except FileExistsError:
  103. print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash , " already exists")
  104. try:
  105. os.mkdir(dir_sound+name_hash)
  106. except FileExistsError:
  107. print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash , " already exists")
  108. try:
  109. os.mkdir(dir_anchor+name_hash)
  110. except FileExistsError:
  111. print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash , " already exists")
  112. try:
  113. os.mkdir(dir_subtitle+name_hash)
  114. except FileExistsError:
  115. print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash , " already exists")
  116. def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  117. make_dir(name_hash)
  118. img_num = 1
  119. for imgu in image_urls:
  120. if get_url_type(imgu) =='video/mp4':
  121. r=requests.get(imgu)
  122. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  123. for chunk in r.iter_content(chunk_size=255):
  124. if chunk:
  125. f.write(chunk)
  126. f.close()
  127. else:
  128. im = Image.open(requests.get(imgu, stream=True).raw)
  129. im= im.convert("RGB")
  130. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  131. img_num+=1
  132. #save text
  133. txt_idx=0
  134. for txt in text_content:
  135. text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
  136. text_file.write(txt)
  137. text_file.close()
  138. txt_idx+=1
  139. print("text file made")
  140. #make mp3
  141. txt_idx = 0
  142. for txt in text_content:
  143. if lang!='zh' or multiLang==1:
  144. if lang!='zh':
  145. tts = gTTS(txt)
  146. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  147. else:
  148. tts = gTTS(txt,lang='zh-tw')
  149. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  150. #speed up
  151. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  152. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  153. ff.run()
  154. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  155. else:
  156. print('use zhtts')
  157. tts = zhtts.TTS()
  158. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  159. txt_idx+=1
  160. print("mp3 file made")
  161. #make title as image
  162. txt2image_title(name, dir_title+name_hash+".png",lang)
  163. def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
  164. make_dir(name_hash)
  165. img_num = 1
  166. for imgu in image_urls:
  167. if get_url_type(imgu) =='video/mp4':
  168. r=requests.get(imgu)
  169. f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
  170. for chunk in r.iter_content(chunk_size=255):
  171. if chunk:
  172. f.write(chunk)
  173. f.close()
  174. else:
  175. im = Image.open(requests.get(imgu, stream=True).raw)
  176. im= im.convert("RGB")
  177. im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
  178. img_num+=1
  179. #make mp3
  180. text_parser = parser()
  181. txt_idx = 0
  182. for txt in text_content:
  183. rep_list = text_parser.replace_list(txt)
  184. for reptxt in rep_list:
  185. txt = txt.replace(reptxt,'')
  186. if lang!='zh' or multiLang==1:
  187. if lang!='zh':
  188. tts = gTTS(txt)
  189. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  190. else:
  191. tts = gTTS(txt,lang='zh-tw')
  192. tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  193. #speed up
  194. ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
  195. , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
  196. ff.run()
  197. os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
  198. else:
  199. print('use zhtts')
  200. tts = zhtts.TTS()
  201. tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
  202. txt_idx+=1
  203. print("mp3 file made")
  204. #make title as image
  205. txt2image_title(name, dir_title+name_hash+".png",lang)
  206. def txt2image(content, save_target,lang='zh'):
  207. unicode_text = trim_punctuation(content)
  208. font = ''
  209. if lang=='zh':
  210. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
  211. else :
  212. font = ImageFont.truetype(font="font/arial.ttf", size=38)
  213. text_width, text_height = font.getsize(unicode_text)
  214. canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
  215. draw = ImageDraw.Draw(canvas)
  216. text= unicode_text
  217. draw.text((5,5), text, (255, 255, 0), font)
  218. canvas.save(save_target, "PNG")
  219. def txt2image_title(content, save_target, lang='zh'):
  220. unicode_text = trim_punctuation(content)
  221. font = ''
  222. if lang=='zh':
  223. font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
  224. else :
  225. font = ImageFont.truetype(font="font/arial.ttf", size=22)
  226. text_width, text_height = font.getsize(unicode_text)
  227. canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
  228. draw = ImageDraw.Draw(canvas)
  229. text= unicode_text
  230. draw.text((5,5), text, (17, 41, 167), font)
  231. canvas.save(save_target, "PNG")
  232. def call_anchor(fileName,avatar):
  233. conn = rpyc.classic.connect("192.168.1.105",18812)
  234. ros = conn.modules.os
  235. rsys = conn.modules.sys
  236. fr=open(dir_sound+fileName+".mp3",'rb')# voice
  237. #warning!!! file my be replaced by other process
  238. fw=conn.builtins.open('/tmp/output.mp3','wb')
  239. while True:
  240. b=fr.read(1024)
  241. if b:
  242. fw.write(b)
  243. else:
  244. break
  245. fr.close()
  246. fw.close()
  247. val=random.randint(1000000,9999999)
  248. ros.chdir('/home/jared/to_video')
  249. ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
  250. while True:
  251. print('waiting...')
  252. if ros.path.exists('/tmp/results/'+str(val)):
  253. break
  254. time.sleep(5)
  255. print('waiting...')
  256. fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
  257. fw=open(dir_anchor+fileName+".mp4",'wb')
  258. while True:
  259. b=fr.read(1024)
  260. if b:
  261. fw.write(b)
  262. else:
  263. break
  264. fr.close()
  265. fw.close()
  266. def syllable_count(word):
  267. word = word.lower()
  268. count = 0
  269. vowels = "aeiouy"
  270. if word[0] in vowels:
  271. count += 1
  272. for index in range(1, len(word)):
  273. if word[index] in vowels and word[index - 1] not in vowels:
  274. count += 1
  275. if word.endswith("e"):
  276. count -= 1
  277. if count == 0:
  278. count += 1
  279. return count
  280. def split_sentence(in_str, maxLen):
  281. re.findall(r'[\u4e00-\u9fff]+', in_str)
  282. zh_idx = []
  283. eng_idx= []
  284. for i in range(len(in_str)):
  285. if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
  286. zh_idx.append(i)
  287. else:
  288. eng_idx.append(i)
  289. space_index = [m.start() for m in re.finditer(' ', in_str)]
  290. for idx in space_index:
  291. eng_idx.remove(idx)
  292. eng_range_list = []
  293. for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
  294. eng_range = list(map(itemgetter(1), g))
  295. eng_range_list.append(eng_range)
  296. total_syllable = 0
  297. for i in range(len(eng_range_list)):
  298. total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
  299. for i in range(len(zh_idx)):
  300. total_syllable+=1
  301. #final chchchchchc[en][en][en]
  302. #[en] is a vocabulary dict with occurence of image
  303. zh_eng_idx_list = []
  304. i = 0
  305. while i < len(in_str):
  306. if in_str[i]==' ':
  307. i+=1
  308. if i in zh_idx:
  309. zh_eng_idx_list.append(i)
  310. i+=1
  311. if i in eng_idx:
  312. for ls in eng_range_list:
  313. if i in ls:
  314. zh_eng_idx_list.append(ls)
  315. i = ls[-1]+1
  316. break
  317. zh_eng_dict_list = [{'content':'','time_ratio':0}]
  318. idx = 0
  319. current_len = 0
  320. sen_idx = 0
  321. while idx < len(zh_eng_idx_list):
  322. str_from_idx = ''
  323. sylla_cnt = 1
  324. if type(zh_eng_idx_list[idx])==type([]):
  325. str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
  326. sylla_cnt = syllable_count(str_from_idx)
  327. else:
  328. str_from_idx = in_str[zh_eng_idx_list[idx]]
  329. if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
  330. zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
  331. zh_eng_dict_list.append({'content':'','time_ratio':0})
  332. sen_idx+=1
  333. current_len = 0
  334. else:
  335. current_len += sylla_cnt
  336. zh_eng_dict_list[sen_idx]['content'] += str_from_idx
  337. idx+=1
  338. total_ratio = 0
  339. for obj in zh_eng_dict_list:
  340. total_ratio+=obj['time_ratio']
  341. zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
  342. return zh_eng_dict_list
  343. def parse_script(file_path,gt_list):
  344. with open(file_path, 'r',encoding="utf-8") as f:
  345. raw_lines = [line.strip() for line in f]
  346. lines = adjustSub_by_text_similarity(gt_list,raw_lines)
  347. text_parser = parser()
  348. #make dict
  349. dict_list = []
  350. for idx in range(len(lines)):
  351. script={}
  352. rep_ls = text_parser.replace_list(lines[idx])
  353. line_content = lines[idx]
  354. for reptxt in rep_ls:
  355. line_content = line_content.replace(reptxt,'')
  356. if len(rep_ls)!=0:
  357. script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
  358. script['content'] = line_content
  359. time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
  360. start = time_raw[0].split(':')
  361. stop = time_raw[1].split(':')
  362. script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
  363. script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
  364. dict_list.append(script)
  365. #merge duplicated sentences
  366. skip_list = []
  367. script_not_dup_list = []
  368. for idx in range(len(dict_list)):
  369. if idx not in skip_list:
  370. dup_list = []
  371. found = 0
  372. for idx_inner in range(len(dict_list)):
  373. if dict_list[idx_inner]['content'] == dict_list[idx]['content'] and idx <= idx_inner:
  374. dup_list.append(idx_inner)
  375. skip_list.append(idx_inner)
  376. found += 1
  377. if found != 0 and dict_list[idx_inner]['content']!=dict_list[idx]['content'] and idx <= idx_inner:
  378. found = 0
  379. break
  380. for dup_idx in dup_list:
  381. if dup_idx == min(dup_list):
  382. dict_list[dup_idx]['type'] = 'lead_sentence'
  383. else:
  384. dict_list[dup_idx]['type'] = 'duplicated'
  385. dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
  386. if dict_list[idx]['type'] == 'lead_sentence':
  387. script_not_dup_list.append(dict_list[idx])
  388. new_idx = 0
  389. splitted_dict = []
  390. for dic in script_not_dup_list:
  391. dic_idx = 0
  392. accumulated_duration = 0
  393. duration = dic['stop']-dic['start']
  394. for sub_dic in split_sentence(dic['content'],13):
  395. new_dic = {}
  396. new_dic['index'] = new_idx
  397. if 'image_idx' in dic:
  398. new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
  399. new_idx+=1
  400. ind_duration = duration * sub_dic['time_ratio']
  401. new_dic['start'] = dic['start'] + accumulated_duration
  402. accumulated_duration += ind_duration
  403. new_dic['content'] = sub_dic['content']
  404. new_dic['duration'] = ind_duration*0.7
  405. splitted_dict.append(new_dic)
  406. return splitted_dict
  407. def adjustSub_by_text_similarity(gts_in,gens_raw):
  408. #call by value only
  409. gts = gts_in[:]
  410. text_parser = parser()
  411. for i in range(len(gts)):
  412. rep_ls = text_parser.replace_list(gts[i])
  413. for reptxt in rep_ls:
  414. gts[i] = gts[i].replace(reptxt,'')
  415. gens = []
  416. for idx in range(int((len(gens_raw)+1)/4)):
  417. gens.append(gens_raw[idx*4+2])
  418. combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
  419. combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
  420. alls = gts + combine2 + combine3
  421. adjusted = [None]*len(gens)
  422. duplicated_list = []
  423. for idx in range(len(gens)):
  424. match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
  425. if len(match_text) != 0:
  426. if match_text[0] not in duplicated_list:
  427. adjusted[idx] = match_text[0]
  428. duplicated_list.append(match_text[0])
  429. else:
  430. if match_text[0] == adjusted[idx-1]:
  431. adjusted[idx] = match_text[0]
  432. else:
  433. found = 0
  434. for mt in match_text:
  435. if mt not in duplicated_list:
  436. adjusted[idx] = mt
  437. found += 1
  438. break
  439. if found ==0:
  440. adjusted[idx] = ' '
  441. else :
  442. adjusted[idx] = ' '
  443. combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
  444. combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
  445. alls_tag = gts_in + combine2_tag + combine3_tag
  446. for idx in range(len(adjusted)):
  447. match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
  448. adjusted[idx] = match_text[0]
  449. return adjusted
  450. def trim_punctuation(s):
  451. pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
  452. pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
  453. res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
  454. return res
  455. def splitter(s):
  456. for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
  457. yield sent
  458. def split_by_pun(s):
  459. res = list(splitter(s))
  460. return res
  461. def generate_subtitle_image_from_dict(name_hash, sub_dict):
  462. for script in sub_dict:
  463. sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  464. sub = script['content']
  465. txt2image(sub,sv_path)
  466. def generate_subtitle_image(name_hash,text_content):
  467. img_list = [None]*len(text_content)
  468. for idx in range(len(text_content)):
  469. img_list[idx]=[]
  470. senList = split_by_pun(text_content[idx])
  471. for inner_idx in range(len(senList)):
  472. sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
  473. sub = senList[inner_idx]
  474. txt2image(sub,sv_path)
  475. img_list[idx]+=[{"count":len(sub),"path":sv_path}]
  476. return img_list
  477. def generate_subtitle_image_ENG(name_hash,text_content):
  478. img_list = [None]*len(text_content)
  479. for idx in range(len(text_content)):
  480. sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
  481. sub = text_content[idx]
  482. txt2image(sub, sv_path,lang='eng')
  483. img_list[idx] = sv_path
  484. return img_list
  485. def video_writer_init(path):
  486. w = openshot.FFmpegWriter(path)
  487. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  488. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  489. openshot.Fraction(1, 1), False, False, 3000000)
  490. return w
  491. def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
  492. file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
  493. for fname in range(len(text_content)):
  494. call_anchor(name_hash+"/"+str(fname),avatar)
  495. print('called............................................')
  496. ck=cKey(0,254,0,270)
  497. ck_anchor=cKey(0,255,1,320)
  498. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  499. t.Open()
  500. main_timer = 0
  501. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  502. LOGO_OP.Open() # Open the reader
  503. head_duration = LOGO_OP.info.duration
  504. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
  505. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  506. t.AddClip(LOGO_OP_clip)
  507. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  508. bg_head.Open()
  509. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  510. t.AddClip(bg_head_clip)
  511. main_timer += head_duration
  512. bg_head.Close()
  513. LOGO_OP.Close()
  514. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  515. anchor.Open()
  516. #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  517. # location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
  518. #t.AddClip(anchor_clip)
  519. speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
  520. speech.Open()
  521. speech_clip = openshot.Clip(speech)
  522. speech_clip.Position(main_timer)
  523. speech_clip.End(anchor.info.duration)
  524. t.AddClip(speech_clip)
  525. main_timer += anchor.info.duration
  526. anchor.Close()
  527. speech.Close()
  528. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  529. LOGO_ED.Open()
  530. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
  531. ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
  532. t.AddClip(LOGO_ED_clip)
  533. main_timer += LOGO_ED.info.duration
  534. LOGO_ED.Close()
  535. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  536. bg.Open()
  537. bg_times = math.floor(main_timer/bg.info.duration)
  538. left_time = (main_timer) % bg.info.duration
  539. bg_clip_list = [None] * bg_times
  540. bg_list = [None] * bg_times
  541. bg.Close()
  542. bg_timer = head_duration
  543. for idx in range(bg_times):
  544. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  545. bg_list[idx].Open()
  546. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
  547. t.AddClip(bg_clip_list[idx])
  548. bg_timer += bg_list[idx].info.duration
  549. bg_list[idx].Close()
  550. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  551. bg_left.Open()
  552. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  553. t.AddClip(bg_left_clip)
  554. bg_left.Close()
  555. title = openshot.QtImageReader(dir_title+name_hash+".png")
  556. title.Open() # Open the reader
  557. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  558. t.AddClip(title_clip)
  559. w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
  560. w.Open()
  561. frames = int(t.info.fps)*int(main_timer)
  562. for n in range(frames):
  563. f=t.GetFrame(n)
  564. w.WriteFrame(f)
  565. t.Close()
  566. w.Close()
  567. print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
  568. #start adding sub
  569. #add sub
  570. Ctr_Autosub.init()
  571. Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
  572. sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
  573. for subd in sub_dict:
  574. print(subd)
  575. generate_subtitle_image_from_dict(name_hash, sub_dict)
  576. #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
  577. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  578. t.Open()
  579. raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
  580. raw.Open()
  581. raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
  582. t.AddClip(raw_clip)
  583. sub_img_list = [None] * len(sub_dict)
  584. sub_clip_list = [None] * len(sub_dict)
  585. for sub_obj in sub_dict:
  586. idx = int(sub_obj['index'])
  587. sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
  588. sub_img_list[idx].Open()
  589. #if sub_obj['duration']>3:
  590. # print('warning')
  591. #print('start:',sub_obj['start'],', duration :', sub_obj['duration'],' content',sub_obj['content'],'idx:',sub_obj['index'])
  592. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
  593. t.AddClip(sub_clip_list[idx])
  594. sub_img_list[idx].Close()
  595. tp = parser()
  596. img_dict_ls = tp.image_clip_info(sub_dict)
  597. img_clip_list = [None]*len(listdir(dir_photo+name_hash))
  598. img_list = [None]*len(img_clip_list)
  599. img_file_ls = listdir(dir_photo+name_hash)
  600. for img_idx in range(len(img_file_ls)):
  601. img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
  602. img_list[img_idx].Open()
  603. img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
  604. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
  605. t.AddClip(img_clip_list[img_idx])
  606. img_list[img_idx].Close()
  607. anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
  608. anchor.Open()
  609. anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  610. location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
  611. t.AddClip(anchor_clip)
  612. w = video_writer_init(tmp_video_dir+name_hash+".mp4")
  613. w.Open()
  614. frames = int(t.info.fps)*int(main_timer)
  615. for n in range(frames):
  616. f=t.GetFrame(n)
  617. w.WriteFrame(f)
  618. t.Close()
  619. w.Close()
  620. os.remove(tmp_video_dir+name_hash+"raw.mp4")
  621. os.remove(tmp_video_dir+name_hash+"script.txt")
  622. print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
  623. def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar):
  624. print(os.getcwd())
  625. print('sub image made')
  626. print(multiLang)
  627. file_prepare(name, name_hash, text_content,image_urls,multiLang)
  628. sub_list=generate_subtitle_image(name_hash,text_content)
  629. for fname in range(len(text_content)):
  630. call_anchor(name_hash+"/"+str(fname),avatar)
  631. print('step finish')
  632. print('called............................................')
  633. ck=cKey(0,254,0,270)
  634. ck_anchor=cKey(0,255,1,320)
  635. duration = 0
  636. #average layer level is 3
  637. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  638. t.Open()
  639. main_timer = 0
  640. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  641. LOGO_OP.Open() # Open the reader
  642. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  643. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  644. t.AddClip(LOGO_OP_clip)
  645. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  646. bg_head.Open()
  647. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  648. t.AddClip(bg_head_clip)
  649. main_timer += LOGO_OP.info.duration
  650. head_duration = LOGO_OP.info.duration
  651. bg_head.Close()
  652. LOGO_OP.Close()
  653. clip_duration=0
  654. photo_clip_list = [None]*len(text_content)
  655. img_list = [None]*len(text_content)
  656. anchor_clip_list = [None] * len(text_content)
  657. anchor_list = [None] * len(text_content)
  658. audio_clip_list = [None] * len(text_content)
  659. audio_list = [None] * len(text_content)
  660. sub_clip_list = [None] * len(text_content)
  661. sub_img_list = [None] * len(text_content)
  662. idx = 0
  663. for p in listdir(dir_photo+name_hash):
  664. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  665. clip_duration = anchor_list[idx].info.duration
  666. anchor_list[idx].Open()
  667. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  668. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  669. t.AddClip(anchor_clip_list[idx])
  670. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  671. img_list[idx].Open()
  672. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  673. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  674. t.AddClip(photo_clip_list[idx])
  675. img_list[idx].Close()
  676. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  677. audio_list[idx].Open()
  678. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  679. audio_clip_list[idx].Position(main_timer)
  680. audio_clip_list[idx].End(clip_duration)
  681. t.AddClip(audio_clip_list[idx])
  682. img_list[idx].Close()
  683. anchor_list[idx].Close()
  684. audio_list[idx].Close()
  685. sub_img_list[idx] = [None] * len(sub_list[idx])
  686. sub_clip_list[idx] = [None] * len(sub_list[idx])
  687. sub_timer = 0
  688. for sub_idx in range(len(sub_list[idx])):
  689. sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
  690. sub_img_list[idx][sub_idx].Open()
  691. sub_duration = 0.205*sub_list[idx][sub_idx]['count']
  692. sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
  693. t.AddClip(sub_clip_list[idx][sub_idx])
  694. sub_img_list[idx][sub_idx].Close()
  695. sub_timer += sub_duration
  696. print(sub_list[idx][sub_idx]['path'])
  697. main_timer += clip_duration
  698. idx+=1
  699. LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
  700. LOGO_ED.Open()
  701. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  702. ,location_x=0.005,location_y=-0.031
  703. ,scale_x=0.8,scale_y=0.6825)
  704. t.AddClip(LOGO_ED_clip)
  705. ED_duration = LOGO_ED.info.duration
  706. LOGO_ED.Close()
  707. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  708. bg.Open()
  709. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  710. left_time = (main_timer+ED_duration) % bg.info.duration
  711. bg_clip_list = [None] * bg_times
  712. bg_list = [None] * bg_times
  713. bg.Close()
  714. bg_timer = head_duration
  715. for idx in range(bg_times):
  716. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  717. bg_list[idx].Open()
  718. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  719. ,end=bg_list[idx].info.duration,ck=ck)
  720. t.AddClip(bg_clip_list[idx])
  721. bg_timer += bg_list[idx].info.duration
  722. bg_list[idx].Close()
  723. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  724. bg_left.Open()
  725. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  726. t.AddClip(bg_left_clip)
  727. bg_left.Close()
  728. title = openshot.QtImageReader(dir_title+name_hash+".png")
  729. title.Open() # Open the reader
  730. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  731. t.AddClip(title_clip)
  732. ####start building
  733. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  734. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  735. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  736. openshot.Fraction(1, 1), False, False, 3000000)
  737. w.Open()
  738. #may change duration into t.info.duration
  739. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  740. for n in range(frames):
  741. f=t.GetFrame(n)
  742. w.WriteFrame(f)
  743. #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  744. t.Close()
  745. w.Close()
  746. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  747. def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar):
  748. file_prepare(name, name_hash, text_content,image_urls,'eng')
  749. sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
  750. for fname in range(len(text_content)):
  751. call_anchor(name_hash+"/"+str(fname),avatar)
  752. print('step finish')
  753. print('called............................................')
  754. ck=cKey(0,254,0,270)
  755. ck_anchor=cKey(0,255,1,320)
  756. duration = 0
  757. #average layer level is 3
  758. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  759. t.Open()
  760. main_timer = 0
  761. #add logo
  762. LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
  763. LOGO_OP.Open() # Open the reader
  764. LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
  765. ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
  766. t.AddClip(LOGO_OP_clip)
  767. #add background video (head is different)
  768. bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
  769. bg_head.Open()
  770. bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
  771. t.AddClip(bg_head_clip)
  772. main_timer += LOGO_OP.info.duration
  773. head_duration = LOGO_OP.info.duration
  774. bg_head.Close()
  775. LOGO_OP.Close()
  776. #prepare empty list
  777. clip_duration=0
  778. photo_clip_list = [None]*len(text_content)
  779. img_list = [None]*len(text_content)
  780. anchor_clip_list = [None] * len(text_content)
  781. anchor_list = [None] * len(text_content)
  782. audio_clip_list = [None] * len(text_content)
  783. audio_list = [None] * len(text_content)
  784. sub_clip_list = [None] * len(text_content)
  785. #openshot image holder
  786. sub_img_list = [None] * len(text_content)
  787. idx = 0
  788. for p in listdir(dir_photo+name_hash):
  789. anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
  790. clip_duration = anchor_list[idx].info.duration
  791. anchor_list[idx].Open()
  792. anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
  793. location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
  794. t.AddClip(anchor_clip_list[idx])
  795. #insert image
  796. img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
  797. img_list[idx].Open()
  798. photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
  799. ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
  800. t.AddClip(photo_clip_list[idx])
  801. img_list[idx].Close()
  802. #insert audio (speech)
  803. audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
  804. audio_list[idx].Open()
  805. audio_clip_list[idx] = openshot.Clip(audio_list[idx])
  806. audio_clip_list[idx].Position(main_timer)
  807. audio_clip_list[idx].End(clip_duration)
  808. t.AddClip(audio_clip_list[idx])
  809. #insert subtitle
  810. sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
  811. sub_img_list[idx].Open()
  812. sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
  813. t.AddClip(sub_clip_list[idx])
  814. img_list[idx].Close()
  815. anchor_list[idx].Close()
  816. audio_list[idx].Close()
  817. sub_img_list[idx].Close()
  818. main_timer += clip_duration
  819. idx+=1
  820. LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
  821. LOGO_ED.Open()
  822. LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
  823. ,location_x=0.005,location_y=-0.031
  824. ,scale_x=0.8,scale_y=0.6825)
  825. t.AddClip(LOGO_ED_clip)
  826. ED_duration = LOGO_ED.info.duration
  827. LOGO_ED.Close()
  828. bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  829. bg.Open()
  830. bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
  831. left_time = (main_timer+ED_duration) % bg.info.duration
  832. bg_clip_list = [None] * bg_times
  833. bg_list = [None] * bg_times
  834. bg.Close()
  835. bg_timer = head_duration
  836. for idx in range(bg_times):
  837. bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  838. bg_list[idx].Open()
  839. bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
  840. ,end=bg_list[idx].info.duration,ck=ck)
  841. t.AddClip(bg_clip_list[idx])
  842. bg_timer += bg_list[idx].info.duration
  843. bg_list[idx].Close()
  844. bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
  845. bg_left.Open()
  846. bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
  847. t.AddClip(bg_left_clip)
  848. bg_left.Close()
  849. title = openshot.QtImageReader(dir_title+name_hash+".png")
  850. title.Open() # Open the reader
  851. title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
  852. t.AddClip(title_clip)
  853. ####start building
  854. w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
  855. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  856. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  857. openshot.Fraction(1, 1), False, False, 3000000)
  858. w.Open()
  859. #may change duration into t.info.duration
  860. frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
  861. for n in range(frames):
  862. f=t.GetFrame(n)
  863. w.WriteFrame(f)
  864. #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  865. t.Close()
  866. w.Close()
  867. print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
  868. #line notifs
  869. import pyttsx3
  870. def make_speech(text):
  871. engine = pyttsx3.init()
  872. #voices = engine.getProperty('voices')
  873. engine.setProperty('voice', 'Mandarin')
  874. engine.save_to_file(text, '/app/speech.mp3')
  875. engine.runAndWait()
  876. class video_service(rpyc.Service):
  877. def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar):
  878. print('ML:'+str(multiLang))
  879. anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar)
  880. def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar):
  881. anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar)
  882. def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
  883. print('ML:'+str(multiLang))#this is long video version,
  884. video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
  885. def exposed_make_speech(self,text):
  886. make_speech(text)
  887. from rpyc.utils.server import ThreadedServer
  888. t = ThreadedServer(video_service, port=8858)
  889. print('service started')
  890. t.start()