| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005 | 
							- from os import listdir
 
- from os.path import isfile, isdir, join
 
- import openshot
 
- import threading
 
- import zhtts
 
- import os 
 
- import urllib
 
- from typing import List
 
- import requests
 
- from pydantic import BaseModel
 
- from bs4 import BeautifulSoup
 
- from PIL import Image,ImageDraw,ImageFont
 
- import pyttsx3
 
- import rpyc
 
- import random
 
- import re
 
- import time
 
- import math
 
- import dataset
 
- from datetime import datetime
 
- from gtts import gTTS
 
- import ffmpy
 
- from difflib import SequenceMatcher
 
- import difflib
 
- from autosub import DEFAULT_CONCURRENCY
 
- from autosub import DEFAULT_SUBTITLE_FORMAT
 
- from pytranscriber.control.ctr_main import Ctr_Main
 
- from pytranscriber.control.ctr_autosub import Ctr_Autosub
 
- import multiprocessing
 
- from itertools import groupby
 
- from operator import itemgetter
 
- from util.parser import parser
 
- dir_sound = 'mp3_track/'
 
- dir_photo = 'photo/'
 
- dir_text = 'text_file/'
 
- dir_video = 'video_material/'
 
- dir_title = 'title/'
 
- dir_subtitle = 'subtitle/'
 
- dir_anchor = 'anchor_raw/'
 
- tmp_video_dir = 'tmp_video/'
 
- video_sub_folder = 'ai_anchor_video/'
 
- dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
 
- def notify_group(msg):
 
-     glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
 
-     for gid in glist:
 
-         headers = {
 
-                 "Authorization": "Bearer " + gid,
 
-                 "Content-Type": "application/x-www-form-urlencoded"
 
-         }
 
-         params = {"message": msg}   
 
-         r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
 
- def cKey(r,g,b,fuzz):
 
-     col=openshot.Color()
 
-     col.red=openshot.Keyframe(r)
 
-     col.green=openshot.Keyframe(g)
 
-     col.blue=openshot.Keyframe(b)
 
-     return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
 
- def video_photo_clip(vid=None,layer=None, position=None, end=None
 
-     ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
 
-     clip = openshot.Clip(vid)
 
-     clip.Layer(layer)
 
-     clip.Position(position)
 
-     clip.End(end)
 
-     clip.scale_x=openshot.Keyframe(scale_x)
 
-     clip.scale_y=openshot.Keyframe(scale_y)
 
-     clip.location_x=openshot.Keyframe(location_x)
 
-     clip.location_y=openshot.Keyframe(location_y)
 
-     
 
-     if ck!=None:
 
-         clip.AddEffect(ck)
 
-     if audio==True:
 
-         clip.has_audio=openshot.Keyframe(1)
 
-     else:
 
-         clip.has_audio=openshot.Keyframe(0)
 
-     return clip
 
- def listener_progress(string, percent):
 
-     True
 
- def myunichchar(unicode_char):
 
-         mb_string = unicode_char.encode('big5')
 
-         try:
 
-             unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
 
-         except NameError:
 
-             unicode_char = chr(mb_string[0] << 8 | mb_string[1])
 
-         return unicode_char
 
- def get_url_type(url):
 
-     req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
 
-     r = urllib.request.urlopen(req)
 
-     contentType = r.getheader('Content-Type')
 
-     return contentType
 
-     
 
- def make_dir(name_hash):
 
-     for direct in dir_list:
 
-         if not os.path.isdir(direct):
 
-             os.mkdir(direct)
 
-     try:
 
-         os.mkdir(dir_photo+name_hash)
 
-     except FileExistsError:
 
-         print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash ,  " already exists")
 
-     try:
 
-         os.mkdir(dir_text+name_hash)
 
-     except FileExistsError:
 
-         print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash ,  " already exists")
 
-     try:
 
-         os.mkdir(dir_sound+name_hash)
 
-     except FileExistsError:
 
-         print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash ,  " already exists")
 
-     try:
 
-         os.mkdir(dir_anchor+name_hash)
 
-     except FileExistsError:
 
-         print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash ,  " already exists")
 
-     try:
 
-         os.mkdir(dir_subtitle+name_hash)
 
-     except FileExistsError:
 
-         print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash ,  " already exists")
 
- def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
 
-     make_dir(name_hash)
 
-     img_num = 1
 
-     for imgu in image_urls:
 
-         if get_url_type(imgu) =='video/mp4':
 
-             r=requests.get(imgu)
 
-             f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
 
-             for chunk in r.iter_content(chunk_size=255): 
 
-                 if chunk:
 
-                     f.write(chunk)
 
-             f.close()
 
-         else:
 
-             im = Image.open(requests.get(imgu, stream=True).raw)
 
-             im= im.convert("RGB")
 
-             im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
 
-         img_num+=1
 
-     #save text
 
-     txt_idx=0
 
-     for txt in text_content:
 
-         text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
 
-         text_file.write(txt)
 
-         text_file.close()
 
-         txt_idx+=1
 
-     print("text file made")
 
-     #make mp3
 
-     txt_idx = 0
 
-     for txt in text_content:
 
-         if lang!='zh' or multiLang==1:
 
-             if lang!='zh':
 
-                 tts = gTTS(txt)
 
-                 tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-             else:
 
-                 tts = gTTS(txt,lang='zh-tw')
 
-                 tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-             #speed up 
 
-             ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
 
-                             , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
 
-             ff.run()
 
-             os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-         else:
 
-             print('use zhtts')
 
-             tts = zhtts.TTS() 
 
-             tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
 
-         txt_idx+=1
 
-     print("mp3 file made")
 
-     #make title as image
 
-     txt2image_title(name, dir_title+name_hash+".png",lang)
 
- def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
 
-     make_dir(name_hash)
 
-     img_num = 1
 
-     for imgu in image_urls:
 
-         if get_url_type(imgu) =='video/mp4':
 
-             r=requests.get(imgu)
 
-             f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
 
-             for chunk in r.iter_content(chunk_size=255): 
 
-                 if chunk:
 
-                     f.write(chunk)
 
-             f.close()
 
-         else:
 
-             im = Image.open(requests.get(imgu, stream=True).raw)
 
-             im= im.convert("RGB")
 
-             im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
 
-         img_num+=1
 
-     #make mp3
 
-     text_parser = parser()
 
-     txt_idx = 0
 
-     for txt in text_content:
 
-         rep_list = text_parser.replace_list(txt)
 
-         for reptxt in rep_list:
 
-             txt = txt.replace(reptxt,'')
 
-         if lang!='zh' or multiLang==1:
 
-             if lang!='zh':
 
-                 tts = gTTS(txt)
 
-                 tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-             else:
 
-                 tts = gTTS(txt,lang='zh-tw')
 
-                 tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-             #speed up 
 
-             ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
 
-                             , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
 
-             ff.run()
 
-             os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
 
-         else:
 
-             print('use zhtts')
 
-             tts = zhtts.TTS() 
 
-             tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
 
-         txt_idx+=1
 
-     print("mp3 file made")
 
-     #make title as image
 
-     txt2image_title(name, dir_title+name_hash+".png",lang)
 
- def txt2image(content, save_target,lang='zh'):
 
-     unicode_text = trim_punctuation(content)
 
-     font = ''
 
-     if lang=='zh':
 
-         font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
 
-     else :
 
-         font = ImageFont.truetype(font="font/arial.ttf", size=38)
 
-     text_width, text_height = font.getsize(unicode_text)
 
-     canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
 
-     draw = ImageDraw.Draw(canvas)
 
-     text= unicode_text
 
-     draw.text((5,5), text, (255, 255, 0), font)
 
-     canvas.save(save_target, "PNG")
 
- def txt2image_title(content, save_target, lang='zh'):
 
-     unicode_text = trim_punctuation(content)
 
-     font = ''
 
-     if lang=='zh':
 
-         font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
 
-     else :
 
-         font = ImageFont.truetype(font="font/arial.ttf", size=22)
 
-     text_width, text_height = font.getsize(unicode_text)
 
-     canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
 
-     draw = ImageDraw.Draw(canvas)
 
-     text= unicode_text
 
-     draw.text((5,5), text, (17, 41, 167), font)
 
-     canvas.save(save_target, "PNG")
 
- def call_anchor(fileName,avatar):
 
-     conn = rpyc.classic.connect("192.168.1.105",18812)
 
-     ros = conn.modules.os 
 
-     rsys = conn.modules.sys 
 
-     fr=open(dir_sound+fileName+".mp3",'rb')# voice
 
-     #warning!!!    file my be replaced by other process
 
-     fw=conn.builtins.open('/tmp/output.mp3','wb')
 
-     while True:
 
-         b=fr.read(1024)
 
-         if b:
 
-             fw.write(b)
 
-         else:
 
-             break
 
-     fr.close()
 
-     fw.close()
 
-     val=random.randint(1000000,9999999)
 
-     ros.chdir('/home/jared/to_video')
 
-     ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
 
-     while True:
 
-         print('waiting...')
 
-         if ros.path.exists('/tmp/results/'+str(val)):
 
-             break
 
-         time.sleep(5)
 
-         print('waiting...')
 
-     fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
 
-     fw=open(dir_anchor+fileName+".mp4",'wb')
 
-     while True:
 
-         b=fr.read(1024)
 
-         if b:
 
-             fw.write(b)
 
-         else:
 
-             break
 
-     fr.close()
 
-     fw.close()
 
-     
 
- def syllable_count(word):
 
-     word = word.lower()
 
-     count = 0
 
-     vowels = "aeiouy"
 
-     if word[0] in vowels:
 
-         count += 1
 
-     for index in range(1, len(word)):
 
-         if word[index] in vowels and word[index - 1] not in vowels:
 
-             count += 1
 
-     if word.endswith("e"):
 
-         count -= 1
 
-     if count == 0:
 
-         count += 1
 
-     return count
 
- def split_sentence(in_str, maxLen):
 
-     re.findall(r'[\u4e00-\u9fff]+', in_str)
 
-     zh_idx = []
 
-     eng_idx= []
 
-     for i in range(len(in_str)):
 
-         if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
 
-             zh_idx.append(i)
 
-         else:
 
-             eng_idx.append(i)
 
-     space_index = [m.start() for m in re.finditer(' ', in_str)]
 
-     for idx in space_index:
 
-         eng_idx.remove(idx)
 
-     
 
-     eng_range_list = []
 
-     for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
 
-         eng_range = list(map(itemgetter(1), g))
 
-         eng_range_list.append(eng_range)
 
-     total_syllable = 0
 
-     for i in range(len(eng_range_list)):
 
-         total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
 
-     for i in range(len(zh_idx)):
 
-         total_syllable+=1
 
-     
 
-     #final chchchchchc[en][en][en]
 
-     #[en] is a vocabulary dict with  occurence of image
 
-     zh_eng_idx_list = []
 
-     i = 0
 
-     while i < len(in_str):
 
-         if in_str[i]==' ':
 
-             i+=1
 
-         if i in zh_idx:
 
-             zh_eng_idx_list.append(i)
 
-             i+=1
 
-         if i in eng_idx:
 
-             for ls in eng_range_list:
 
-                 if i in ls:
 
-                     zh_eng_idx_list.append(ls)
 
-                     i = ls[-1]+1
 
-                     break
 
-             
 
-     zh_eng_dict_list = [{'content':'','time_ratio':0}]
 
-     idx = 0 
 
-     current_len = 0
 
-     sen_idx = 0
 
-     while idx < len(zh_eng_idx_list):
 
-         str_from_idx = ''
 
-         sylla_cnt = 1
 
-         if type(zh_eng_idx_list[idx])==type([]):
 
-             str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
 
-             sylla_cnt = syllable_count(str_from_idx)
 
-         else:
 
-             str_from_idx = in_str[zh_eng_idx_list[idx]]
 
-     
 
-       
 
-         if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
 
-             zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
 
-            
 
-             zh_eng_dict_list.append({'content':'','time_ratio':0})
 
-             sen_idx+=1
 
-             current_len = 0
 
-         else:
 
-             current_len += sylla_cnt
 
-             zh_eng_dict_list[sen_idx]['content'] += str_from_idx
 
-             idx+=1
 
-         
 
-     total_ratio = 0
 
-     for obj in zh_eng_dict_list:
 
-         total_ratio+=obj['time_ratio']
 
-     zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
 
-     return zh_eng_dict_list
 
-    
 
- def parse_script(file_path,gt_list):
 
-     with open(file_path, 'r',encoding="utf-8") as f:
 
-         raw_lines = [line.strip() for line in f]
 
-     lines = adjustSub_by_text_similarity(gt_list,raw_lines)
 
-     text_parser = parser()
 
-     #make dict
 
-     dict_list = []
 
-     for idx in range(len(lines)):
 
-         script={}
 
-         print(lines[idx])
 
-         rep_ls = text_parser.replace_list(lines[idx])
 
-         print(rep_ls)
 
-         line_content = lines[idx]
 
-         for reptxt in rep_ls:
 
-             line_content = line_content.replace(reptxt,'')
 
-         if len(rep_ls)!=0:
 
-             script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
 
-         script['content'] = line_content
 
-         time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
 
-         start = time_raw[0].split(':')
 
-         stop = time_raw[1].split(':')
 
-         script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
 
-         script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
 
-         dict_list.append(script)
 
-     #merge duplicated sentences
 
-     script_not_dup_list = []
 
-     for idx in range(len(dict_list)):
 
-         dup_list = []
 
-         for idx_inner in range(len(dict_list)):
 
-             if dict_list[idx_inner]['content']==dict_list[idx]['content']:
 
-                 dup_list.append(idx_inner)
 
-         for dup_idx in dup_list:
 
-             if dup_idx == min(dup_list):
 
-                 dict_list[dup_idx]['type'] = 'lead_sentence'
 
-             else:
 
-                 dict_list[dup_idx]['type'] = 'duplicated'
 
-         dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
 
-         if dict_list[idx]['type'] == 'lead_sentence':
 
-             script_not_dup_list.append(dict_list[idx])
 
-                 
 
-     #avoid subtitle overlapping ?   Timeline overlapping not found currently
 
-     #cut by max length---->  eng seperated problem   {eng_idx}
 
-     #ENG counts, zh counts, space counts
 
-     new_idx = 0
 
-     splitted_dict = []
 
-     for dic in script_not_dup_list:
 
-         dic_idx = 0
 
-         accumulated_duration = 0
 
-         duration = dic['stop']-dic['start']
 
-         for sub_dic in split_sentence(dic['content'],13):
 
-             new_dic = {}
 
-             new_dic['index'] = new_idx
 
-             if 'image_idx' in dic:
 
-                 new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
 
-             new_idx+=1
 
-             ind_duration = duration * sub_dic['time_ratio']
 
-             new_dic['start'] = dic['start'] + accumulated_duration
 
-             accumulated_duration += ind_duration
 
-             new_dic['content'] = sub_dic['content']
 
-             new_dic['duration'] = ind_duration*0.7
 
-             splitted_dict.append(new_dic)
 
-     
 
-     return splitted_dict
 
- def adjustSub_by_text_similarity(gts_in,gens_raw):
 
-     #call by value only
 
-     gts = gts_in[:]
 
-     text_parser = parser()
 
-     for i in range(len(gts)):
 
-         rep_ls = text_parser.replace_list(gts[i])
 
-         for reptxt in rep_ls:
 
-             gts[i] = gts[i].replace(reptxt,'')
 
-     print(gts)
 
-     gens = []
 
-     for idx in range(int((len(gens_raw)+1)/4)):
 
-         gens.append(gens_raw[idx*4+2])
 
-     
 
-     combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
 
-     combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
 
-     alls = gts + combine2 + combine3
 
-     adjusted = [None]*len(gens)
 
-     duplicated_list = []
 
-     for idx in range(len(gens)):
 
-         match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
 
-         if match_text[0] in duplicated_list:
 
-             for mt in match_text:
 
-                 if mt == adjusted[idx-1] or mt not in duplicated_list:
 
-                     adjusted[idx] = mt
 
-                     break
 
-         else:
 
-             adjusted[idx] = match_text[0]
 
-             duplicated_list.append(match_text[0])
 
-     combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
 
-     combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
 
-     alls_tag = gts_in + combine2_tag + combine3_tag
 
-     for idx in range(len(adjusted)):
 
-         match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
 
-         adjusted[idx] = match_text[0]
 
-     return adjusted
 
- def trim_punctuation(s):
 
-     pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
 
-     pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
 
-     res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
 
-     return res
 
- def splitter(s):
 
-     for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
 
-         yield sent
 
- def split_by_pun(s):
 
-     res = list(splitter(s))
 
-     return res
 
- def generate_subtitle_image_from_dict(name_hash, sub_dict):
 
-     for script in sub_dict:
 
-         sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
 
-         sub = script['content']
 
-         txt2image(sub,sv_path)
 
- def generate_subtitle_image(name_hash,text_content):
 
-     img_list = [None]*len(text_content)
 
-     for idx in range(len(text_content)):
 
-         img_list[idx]=[]
 
-         senList = split_by_pun(text_content[idx])
 
-         for inner_idx in range(len(senList)):
 
-             sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
 
-             sub = senList[inner_idx]
 
-             txt2image(sub,sv_path)
 
-             img_list[idx]+=[{"count":len(sub),"path":sv_path}]
 
-     return img_list
 
- def generate_subtitle_image_ENG(name_hash,text_content):
 
-     img_list = [None]*len(text_content)
 
-     for idx in range(len(text_content)):
 
-         sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
 
-         sub = text_content[idx]
 
-         txt2image(sub, sv_path,lang='eng')
 
-         img_list[idx] = sv_path
 
-     return img_list
 
- def video_writer_init(path):
 
-     w = openshot.FFmpegWriter(path)
 
-     w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
 
-     w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
 
-         openshot.Fraction(1, 1), False, False, 3000000)
 
-     return w
 
- def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
 
-     file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
 
-     
 
-     for fname in range(len(text_content)):
 
-         call_anchor(name_hash+"/"+str(fname),avatar)
 
-     print('called............................................')
 
-     ck=cKey(0,254,0,270)
 
-     ck_anchor=cKey(0,255,1,320)
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     main_timer = 0
 
-     LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
 
-     LOGO_OP.Open()         # Open the reader
 
-     head_duration = LOGO_OP.info.duration
 
-     LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
 
-                     ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
 
-     t.AddClip(LOGO_OP_clip)
 
-     bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
 
-     bg_head.Open()
 
-     bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
 
-     t.AddClip(bg_head_clip)
 
-     main_timer += head_duration
 
-     bg_head.Close()
 
-     LOGO_OP.Close()
 
-     
 
-     anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
 
-     anchor.Open()
 
-     #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
 
-     #        location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
 
-     #t.AddClip(anchor_clip)
 
-     speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
 
-     speech.Open()
 
-     speech_clip = openshot.Clip(speech)
 
-     speech_clip.Position(main_timer)
 
-     speech_clip.End(anchor.info.duration)
 
-     t.AddClip(speech_clip)
 
-     main_timer += anchor.info.duration
 
-     anchor.Close()
 
-     speech.Close()
 
-     
 
-     LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
 
-     LOGO_ED.Open()
 
-     LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
 
-                     ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
 
-     t.AddClip(LOGO_ED_clip)
 
-     main_timer += LOGO_ED.info.duration
 
-     LOGO_ED.Close()
 
-     
 
-     bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg.Open()
 
-     bg_times = math.floor(main_timer/bg.info.duration)
 
-     left_time = (main_timer) % bg.info.duration
 
-     bg_clip_list = [None] * bg_times
 
-     bg_list = [None] * bg_times
 
-     bg.Close()
 
-     bg_timer = head_duration
 
-     for idx in range(bg_times):
 
-         bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-         bg_list[idx].Open()
 
-         bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
 
-         t.AddClip(bg_clip_list[idx])
 
-         bg_timer += bg_list[idx].info.duration
 
-         bg_list[idx].Close()
 
-     bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg_left.Open()
 
-     bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
 
-     t.AddClip(bg_left_clip)
 
-     bg_left.Close()
 
-     title = openshot.QtImageReader(dir_title+name_hash+".png")
 
-     title.Open()         # Open the reader
 
-     title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
 
-     t.AddClip(title_clip)
 
-     w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
 
-     w.Open()
 
-     frames = int(t.info.fps)*int(main_timer)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-     t.Close()
 
-     w.Close()
 
-     print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
 
-     #start adding sub
 
-     
 
-     #add sub
 
-     Ctr_Autosub.init()
 
-     Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
 
-     
 
-     sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
 
-     for subd in sub_dict:
 
-         print(subd)
 
-     
 
-     generate_subtitle_image_from_dict(name_hash, sub_dict)
 
-     #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
 
-     raw.Open()
 
-     raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
 
-     t.AddClip(raw_clip)
 
-     
 
-     sub_img_list = [None] * len(sub_dict)
 
-     sub_clip_list = [None] * len(sub_dict)
 
-     for sub_obj in sub_dict:
 
-         idx = int(sub_obj['index'])
 
-         sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
 
-         sub_img_list[idx].Open()
 
-         sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
 
-         t.AddClip(sub_clip_list[idx])
 
-         sub_img_list[idx].Close()
 
-     tp = parser()
 
-     img_dict_ls = tp.image_clip_info(sub_dict)
 
-     #if 'image_idx' in dic:
 
-     #            new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
 
-     img_clip_list = [None]*len(listdir(dir_photo+name_hash))
 
-     img_list = [None]*len(img_clip_list)
 
-     #for p in listdir(dir_photo+name_hash):
 
-     #    p_idx = int(p)-1
 
-     #    img_list[p_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
 
-     #    img_list[p_idx].Open()
 
-     #    img_clip_list[p_idx] = video_photo_clip(vid=img_list[p_idx],layer=3
 
-     #            ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[int(p)+1]['start'],end=img_dict_ls[int(p)+1]['duration'],audio=False)
 
-     #    t.AddClip(img_clip_list[p_idx])
 
-     #    img_list[p_idx].Close()
 
-     img_file_ls = listdir(dir_photo+name_hash)
 
-     print(img_file_ls)
 
-     print(img_dict_ls)
 
-     for img_idx in range(len(img_file_ls)):
 
-         img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
 
-         img_list[img_idx].Open()
 
-         img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
 
-                 ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
 
-         t.AddClip(img_clip_list[img_idx])
 
-         img_list[img_idx].Close()
 
-     anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
 
-     anchor.Open()
 
-     anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
 
-             location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
 
-     t.AddClip(anchor_clip)
 
-     w = video_writer_init(tmp_video_dir+name_hash+".mp4")
 
-     w.Open()
 
-     frames = int(t.info.fps)*int(main_timer)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-     t.Close()
 
-     w.Close()
 
-     os.remove(tmp_video_dir+name_hash+"raw.mp4")
 
-     os.remove(tmp_video_dir+name_hash+"script.txt")
 
-     print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
 
- def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar):
 
-     print(os.getcwd())
 
-     print('sub image made')
 
-     print(multiLang)
 
-     file_prepare(name, name_hash, text_content,image_urls,multiLang)
 
-     sub_list=generate_subtitle_image(name_hash,text_content)
 
-     
 
-     for fname in range(len(text_content)):
 
-         call_anchor(name_hash+"/"+str(fname),avatar)
 
-         print('step finish')
 
-     print('called............................................')
 
-     ck=cKey(0,254,0,270)
 
-     ck_anchor=cKey(0,255,1,320)
 
-     duration = 0
 
-     #average layer level is 3
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     main_timer = 0
 
-     
 
-     LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
 
-     LOGO_OP.Open()         # Open the reader
 
-     LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
 
-                     ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
 
-     t.AddClip(LOGO_OP_clip)
 
-     bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
 
-     bg_head.Open()
 
-     bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
 
-     t.AddClip(bg_head_clip)
 
-     main_timer += LOGO_OP.info.duration
 
-     head_duration = LOGO_OP.info.duration
 
-     bg_head.Close()
 
-     LOGO_OP.Close()
 
-     
 
-     clip_duration=0
 
-     photo_clip_list = [None]*len(text_content)
 
-     img_list = [None]*len(text_content)
 
-     anchor_clip_list = [None] * len(text_content)
 
-     anchor_list = [None] * len(text_content)
 
-     audio_clip_list = [None] * len(text_content)
 
-     audio_list = [None] * len(text_content)
 
-     sub_clip_list = [None] * len(text_content)
 
-     sub_img_list = [None] * len(text_content)
 
-     
 
-     idx = 0
 
-     for p in listdir(dir_photo+name_hash):
 
-         
 
-         anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
 
-         clip_duration = anchor_list[idx].info.duration
 
-         anchor_list[idx].Open()
 
-         anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
 
-                 location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
 
-         t.AddClip(anchor_clip_list[idx])
 
-         img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
 
-         img_list[idx].Open()
 
-         photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
 
-                 ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
 
-         t.AddClip(photo_clip_list[idx])
 
-         img_list[idx].Close()
 
-         audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
 
-         audio_list[idx].Open()
 
-         audio_clip_list[idx] = openshot.Clip(audio_list[idx])
 
-         audio_clip_list[idx].Position(main_timer)
 
-         audio_clip_list[idx].End(clip_duration)
 
-         t.AddClip(audio_clip_list[idx])
 
-         img_list[idx].Close()
 
-         anchor_list[idx].Close()
 
-         audio_list[idx].Close()
 
-             
 
-         sub_img_list[idx] = [None] * len(sub_list[idx])
 
-         sub_clip_list[idx] = [None] * len(sub_list[idx])
 
-         sub_timer = 0
 
-         for sub_idx in range(len(sub_list[idx])):
 
-             sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
 
-             sub_img_list[idx][sub_idx].Open()
 
-             sub_duration = 0.205*sub_list[idx][sub_idx]['count']
 
-             sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
 
-             t.AddClip(sub_clip_list[idx][sub_idx])
 
-             sub_img_list[idx][sub_idx].Close()
 
-             sub_timer += sub_duration
 
-             print(sub_list[idx][sub_idx]['path'])
 
-         main_timer += clip_duration
 
-         idx+=1
 
-     
 
-     LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
 
-     LOGO_ED.Open()
 
-     LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
 
-                     ,location_x=0.005,location_y=-0.031
 
-                     ,scale_x=0.8,scale_y=0.6825)
 
-     t.AddClip(LOGO_ED_clip)
 
-     ED_duration = LOGO_ED.info.duration
 
-     LOGO_ED.Close()
 
-     
 
-     bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg.Open()
 
-     bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
 
-     left_time = (main_timer+ED_duration) % bg.info.duration
 
-     bg_clip_list = [None] * bg_times
 
-     bg_list = [None] * bg_times
 
-     bg.Close()
 
-     bg_timer = head_duration
 
-     for idx in range(bg_times):
 
-         bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-         bg_list[idx].Open()
 
-         bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
 
-                 ,end=bg_list[idx].info.duration,ck=ck)
 
-         t.AddClip(bg_clip_list[idx])
 
-         bg_timer += bg_list[idx].info.duration
 
-         bg_list[idx].Close()
 
-     bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg_left.Open()
 
-     bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
 
-     t.AddClip(bg_left_clip)
 
-     bg_left.Close()
 
-     title = openshot.QtImageReader(dir_title+name_hash+".png")
 
-     title.Open()         # Open the reader
 
-     title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
 
-     t.AddClip(title_clip)
 
-     ####start building
 
-     w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
 
-     w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
 
-     w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
 
-         openshot.Fraction(1, 1), False, False, 3000000)
 
-     w.Open()
 
-     
 
-     #may change duration into t.info.duration
 
-     frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-         
 
-     #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
 
-     t.Close()
 
-     w.Close()
 
-     print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
 
- def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar):
 
-     file_prepare(name, name_hash, text_content,image_urls,'eng')
 
-     sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
 
-     
 
-     for fname in range(len(text_content)):
 
-         call_anchor(name_hash+"/"+str(fname),avatar)
 
-         print('step finish')
 
-     print('called............................................')
 
-     ck=cKey(0,254,0,270)
 
-     ck_anchor=cKey(0,255,1,320)
 
-     duration = 0
 
-     #average layer level is 3
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     main_timer = 0
 
-     #add logo
 
-     LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
 
-     LOGO_OP.Open()         # Open the reader
 
-     LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
 
-                     ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
 
-     t.AddClip(LOGO_OP_clip)
 
-     #add background video  (head is different)
 
-     bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
 
-     bg_head.Open()
 
-     bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
 
-     t.AddClip(bg_head_clip)
 
-     
 
-     main_timer += LOGO_OP.info.duration
 
-     head_duration = LOGO_OP.info.duration
 
-     bg_head.Close()
 
-     LOGO_OP.Close()
 
-     #prepare empty list 
 
-     clip_duration=0
 
-     photo_clip_list = [None]*len(text_content)
 
-     img_list = [None]*len(text_content)
 
-     anchor_clip_list = [None] * len(text_content)
 
-     anchor_list = [None] * len(text_content)
 
-     audio_clip_list = [None] * len(text_content)
 
-     audio_list = [None] * len(text_content)
 
-     sub_clip_list = [None] * len(text_content)
 
-     #openshot image holder
 
-     sub_img_list = [None] * len(text_content)
 
-     
 
-     idx = 0
 
-     for p in listdir(dir_photo+name_hash):
 
-         
 
-         anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
 
-         clip_duration = anchor_list[idx].info.duration
 
-         anchor_list[idx].Open()
 
-         anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
 
-                 location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
 
-         t.AddClip(anchor_clip_list[idx])
 
-         #insert image 
 
-         img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
 
-         img_list[idx].Open()
 
-         photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
 
-                 ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
 
-         t.AddClip(photo_clip_list[idx])
 
-         img_list[idx].Close()
 
-         #insert audio (speech)
 
-         audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
 
-         audio_list[idx].Open()
 
-         audio_clip_list[idx] = openshot.Clip(audio_list[idx])
 
-         audio_clip_list[idx].Position(main_timer)
 
-         audio_clip_list[idx].End(clip_duration)
 
-         t.AddClip(audio_clip_list[idx])
 
-         #insert subtitle
 
-         sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
 
-         sub_img_list[idx].Open()
 
-         sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
 
-         t.AddClip(sub_clip_list[idx])
 
-         img_list[idx].Close()
 
-         anchor_list[idx].Close()
 
-         audio_list[idx].Close()
 
-         sub_img_list[idx].Close()
 
-             
 
-         main_timer += clip_duration
 
-         idx+=1
 
-     
 
-     LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
 
-     LOGO_ED.Open()
 
-     LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
 
-                     ,location_x=0.005,location_y=-0.031
 
-                     ,scale_x=0.8,scale_y=0.6825)
 
-     t.AddClip(LOGO_ED_clip)
 
-     ED_duration = LOGO_ED.info.duration
 
-     LOGO_ED.Close()
 
-     
 
-     bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg.Open()
 
-     bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
 
-     left_time = (main_timer+ED_duration) % bg.info.duration
 
-     bg_clip_list = [None] * bg_times
 
-     bg_list = [None] * bg_times
 
-     bg.Close()
 
-     bg_timer = head_duration
 
-     for idx in range(bg_times):
 
-         bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-         bg_list[idx].Open()
 
-         bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
 
-                 ,end=bg_list[idx].info.duration,ck=ck)
 
-         t.AddClip(bg_clip_list[idx])
 
-         bg_timer += bg_list[idx].info.duration
 
-         bg_list[idx].Close()
 
-     bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
 
-     bg_left.Open()
 
-     bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
 
-     t.AddClip(bg_left_clip)
 
-     bg_left.Close()
 
-     title = openshot.QtImageReader(dir_title+name_hash+".png")
 
-     title.Open()         # Open the reader
 
-     title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
 
-     t.AddClip(title_clip)
 
-     ####start building
 
-     w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
 
-     w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
 
-     w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
 
-         openshot.Fraction(1, 1), False, False, 3000000)
 
-     w.Open()
 
-     
 
-     #may change duration into t.info.duration
 
-     frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-         
 
-     #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
 
-     t.Close()
 
-     w.Close()
 
-     print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
 
-     #line notifs
 
- import pyttsx3
 
- def make_speech(text):
 
-     engine = pyttsx3.init()
 
-     #voices = engine.getProperty('voices')
 
-     engine.setProperty('voice', 'Mandarin')
 
-     engine.save_to_file(text, '/app/speech.mp3')
 
-     engine.runAndWait()
 
-     
 
- class video_service(rpyc.Service):
 
-     def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar):
 
-         print('ML:'+str(multiLang))
 
-         anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar)
 
-     def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar):
 
-         anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar)
 
-     def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
 
-         print('ML:'+str(multiLang))#this is long video version,
 
-         video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
 
-     def exposed_make_speech(self,text):
 
-         make_speech(text)
 
- from rpyc.utils.server import ThreadedServer
 
- t = ThreadedServer(video_service, port=8858)
 
- print('service started')
 
- t.start()
 
 
  |