|
@@ -0,0 +1,1104 @@
|
|
|
+from os import listdir
|
|
|
+from os.path import isfile, isdir, join
|
|
|
+import openshot
|
|
|
+import threading
|
|
|
+import zhtts
|
|
|
+import os
|
|
|
+import urllib
|
|
|
+from typing import List
|
|
|
+import requests
|
|
|
+from pydantic import BaseModel
|
|
|
+from bs4 import BeautifulSoup
|
|
|
+from PIL import Image,ImageDraw,ImageFont
|
|
|
+import pyttsx3
|
|
|
+import rpyc
|
|
|
+import random
|
|
|
+import re
|
|
|
+import time
|
|
|
+import math
|
|
|
+import dataset
|
|
|
+from datetime import datetime
|
|
|
+from gtts import gTTS
|
|
|
+import ffmpy
|
|
|
+from difflib import SequenceMatcher
|
|
|
+import difflib
|
|
|
+from autosub import DEFAULT_CONCURRENCY
|
|
|
+from autosub import DEFAULT_SUBTITLE_FORMAT
|
|
|
+from pytranscriber.control.ctr_main import Ctr_Main
|
|
|
+from pytranscriber.control.ctr_autosub import Ctr_Autosub
|
|
|
+import multiprocessing
|
|
|
+from itertools import groupby
|
|
|
+from operator import itemgetter
|
|
|
+from openUtil.parser import parser
|
|
|
+import pandas as pd
|
|
|
+import numpy as np
|
|
|
+import jieba
|
|
|
+import jieba.posseg as pseg
|
|
|
+import urllib.request
|
|
|
+import librosa
|
|
|
+from pydub import AudioSegment
|
|
|
+from pydub.silence import split_on_silence
|
|
|
+import itertools
|
|
|
+from hakkaUtil import *
|
|
|
+
|
|
|
+dir_sound = 'mp3_track/'
|
|
|
+dir_photo = 'photo/'
|
|
|
+dir_text = 'text_file/'
|
|
|
+dir_video = 'video_material/'
|
|
|
+dir_title = 'title/'
|
|
|
+dir_subtitle = 'subtitle/'
|
|
|
+dir_anchor = 'anchor_raw/'
|
|
|
+tmp_video_dir = 'tmp_video/'
|
|
|
+video_sub_folder = 'ai_anchor_video/'
|
|
|
+
|
|
|
+dir_list = [dir_sound,dir_photo,dir_text,dir_video,dir_title,dir_subtitle,dir_anchor,tmp_video_dir]
|
|
|
+
|
|
|
+def notify_group(msg):
|
|
|
+ glist=['7vilzohcyQMPLfAMRloUawiTV4vtusZhxv8Czo7AJX8','WekCRfnAirSiSxALiD6gcm0B56EejsoK89zFbIaiZQD','1dbtJHbWVbrooXmQqc4r8OyRWDryjD4TMJ6DiDsdgsX','HOB1kVNgIb81tTB4Ort1BfhVp9GFo6NlToMQg88vEhh']
|
|
|
+ for gid in glist:
|
|
|
+ headers = {
|
|
|
+ "Authorization": "Bearer " + gid,
|
|
|
+ "Content-Type": "application/x-www-form-urlencoded"
|
|
|
+ }
|
|
|
+ params = {"message": msg}
|
|
|
+ r = requests.post("https://notify-api.line.me/api/notify",headers=headers, params=params)
|
|
|
+
|
|
|
+def cKey(r,g,b,fuzz):
|
|
|
+ col=openshot.Color()
|
|
|
+ col.red=openshot.Keyframe(r)
|
|
|
+ col.green=openshot.Keyframe(g)
|
|
|
+ col.blue=openshot.Keyframe(b)
|
|
|
+ return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
|
|
|
+
|
|
|
+def video_photo_clip(vid=None,layer=None, position=None, end=None
|
|
|
+ ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
|
|
|
+ clip = openshot.Clip(vid)
|
|
|
+ clip.Layer(layer)
|
|
|
+ clip.Position(position)
|
|
|
+ clip.End(end)
|
|
|
+ clip.scale_x=openshot.Keyframe(scale_x)
|
|
|
+ clip.scale_y=openshot.Keyframe(scale_y)
|
|
|
+ clip.location_x=openshot.Keyframe(location_x)
|
|
|
+ clip.location_y=openshot.Keyframe(location_y)
|
|
|
+
|
|
|
+ if ck!=None:
|
|
|
+ clip.AddEffect(ck)
|
|
|
+ if audio==True:
|
|
|
+ clip.has_audio=openshot.Keyframe(1)
|
|
|
+ else:
|
|
|
+ clip.has_audio=openshot.Keyframe(0)
|
|
|
+ return clip
|
|
|
+
|
|
|
+def listener_progress(string, percent):
|
|
|
+ True
|
|
|
+
|
|
|
+def myunichchar(unicode_char):
|
|
|
+ mb_string = unicode_char.encode('big5')
|
|
|
+ try:
|
|
|
+ unicode_char = unichr(ord(mb_string[0]) << 8 | ord(mb_string[1]))
|
|
|
+ except NameError:
|
|
|
+ unicode_char = chr(mb_string[0] << 8 | mb_string[1])
|
|
|
+ return unicode_char
|
|
|
+
|
|
|
+def get_url_type(url):
|
|
|
+ print('---------------------------------------------')
|
|
|
+ print(url)
|
|
|
+ req = urllib.request.Request(url, method='HEAD', headers={'User-Agent': 'Mozilla/5.0'})
|
|
|
+ r = urllib.request.urlopen(req)
|
|
|
+ contentType = r.getheader('Content-Type')
|
|
|
+ print(contentType)
|
|
|
+ print('-------------------------------------------------')
|
|
|
+ return contentType
|
|
|
+
|
|
|
+def make_dir(name_hash):
|
|
|
+ for direct in dir_list:
|
|
|
+ if not os.path.isdir(direct):
|
|
|
+ os.mkdir(direct)
|
|
|
+ try:
|
|
|
+ os.mkdir(dir_photo+name_hash)
|
|
|
+ except FileExistsError:
|
|
|
+ print("~~~~~~Warning~~~~~~~~~Directory " , dir_photo+name_hash , " already exists")
|
|
|
+ try:
|
|
|
+ os.mkdir(dir_text+name_hash)
|
|
|
+ except FileExistsError:
|
|
|
+ print("~~~~~~Warning~~~~~~~~~Directory " , dir_text+name_hash , " already exists")
|
|
|
+ try:
|
|
|
+ os.mkdir(dir_sound+name_hash)
|
|
|
+ except FileExistsError:
|
|
|
+ print("~~~~~~Warning~~~~~~~~~Directory " , dir_sound+name_hash , " already exists")
|
|
|
+ try:
|
|
|
+ os.mkdir(dir_anchor+name_hash)
|
|
|
+ except FileExistsError:
|
|
|
+ print("~~~~~~Warning~~~~~~~~~Directory " , dir_anchor+name_hash , " already exists")
|
|
|
+ try:
|
|
|
+ os.mkdir(dir_subtitle+name_hash)
|
|
|
+ except FileExistsError:
|
|
|
+ print("~~~~~~Warning~~~~~~~~~Directory " , dir_subtitle+name_hash , " already exists")
|
|
|
+
|
|
|
+def hakkaTTS(mp3_path,ch_sentence,gender):
|
|
|
+ download = False #如果要下載才需要Ture
|
|
|
+ hakka_100 = import_hakka_100()
|
|
|
+ word_data,multi_sound = import_data()
|
|
|
+ if download:
|
|
|
+ download_mp3(word_data,multi_sound)
|
|
|
+ download_hakka_100(hakka_100)
|
|
|
+ ch_word_list = list(itertools.chain(*word_data['華語詞義集'].tolist())) + hakka_100.chinese_clean.tolist()
|
|
|
+ import_jieba_userdict(ch_word_list=ch_word_list, userDict_path='userDict.txt')
|
|
|
+ gen_hakka_tts(word_data,multi_sound,hakka_100,ch_sentence,gender,mp3_path)
|
|
|
+
|
|
|
+
|
|
|
+def file_prepare(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
|
|
|
+ make_dir(name_hash)
|
|
|
+ img_num = 1
|
|
|
+ for imgu in image_urls:
|
|
|
+ if get_url_type(imgu) =='video/mp4':
|
|
|
+ r=requests.get(imgu)
|
|
|
+ f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
|
|
|
+ for chunk in r.iter_content(chunk_size=255):
|
|
|
+ if chunk:
|
|
|
+ f.write(chunk)
|
|
|
+ f.close()
|
|
|
+ else:
|
|
|
+ im = Image.open(requests.get(imgu, stream=True).raw)
|
|
|
+ im= im.convert("RGB")
|
|
|
+ im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
|
|
|
+ img_num+=1
|
|
|
+ #save text
|
|
|
+ txt_idx=0
|
|
|
+ for txt in text_content:
|
|
|
+ text_file = open(dir_text+name_hash+"/"+str(txt_idx)+".txt", "w")
|
|
|
+ text_file.write(txt)
|
|
|
+ text_file.close()
|
|
|
+ txt_idx+=1
|
|
|
+ print("text file made")
|
|
|
+ #make mp3
|
|
|
+ txt_idx = 0
|
|
|
+ for txt in text_content:
|
|
|
+ if multiLang==3:
|
|
|
+ hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,0)
|
|
|
+ elif multiLang==4:
|
|
|
+ hakkaTTS(dir_sound+name_hash+"/"+str(txt_idx)+".mp3",txt,1)
|
|
|
+ else:
|
|
|
+ if lang!='zh' or multiLang==1:
|
|
|
+ if lang!='zh':
|
|
|
+ tts = gTTS(txt)
|
|
|
+ tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ else:
|
|
|
+ tts = gTTS(txt,lang='zh-tw')
|
|
|
+ tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ #speed up
|
|
|
+ ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
|
|
|
+ , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
|
|
|
+ ff.run()
|
|
|
+ os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ else:
|
|
|
+ print('use zhtts')
|
|
|
+ tts = zhtts.TTS()
|
|
|
+ tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
|
|
|
+ txt_idx+=1
|
|
|
+ print("mp3 file made")
|
|
|
+ #make title as image
|
|
|
+ txt2image_title(name, dir_title+name_hash+".png",lang)
|
|
|
+
|
|
|
+
|
|
|
+def file_prepare_long(name, name_hash,text_content,image_urls,multiLang,lang='zh'):
|
|
|
+ make_dir(name_hash)
|
|
|
+ img_num = 1
|
|
|
+ for imgu in image_urls:
|
|
|
+ if get_url_type(imgu) =='video/mp4':
|
|
|
+ r=requests.get(imgu)
|
|
|
+ f=open(dir_photo+name_hash+"/"+str(img_num)+".mp4",'wb')
|
|
|
+ for chunk in r.iter_content(chunk_size=255):
|
|
|
+ if chunk:
|
|
|
+ f.write(chunk)
|
|
|
+ f.close()
|
|
|
+ else:
|
|
|
+ im = Image.open(requests.get(imgu, stream=True).raw)
|
|
|
+ im= im.convert("RGB")
|
|
|
+ im.save(dir_photo+name_hash+"/"+str(img_num)+".jpg")
|
|
|
+ img_num+=1
|
|
|
+
|
|
|
+ #make mp3
|
|
|
+ text_parser = parser()
|
|
|
+ txt_idx = 0
|
|
|
+ for txt in text_content:
|
|
|
+ rep_list = text_parser.replace_list(txt)
|
|
|
+ for reptxt in rep_list:
|
|
|
+ txt = txt.replace(reptxt,'')
|
|
|
+ if lang!='zh' or multiLang==1:
|
|
|
+ if lang!='zh':
|
|
|
+ tts = gTTS(txt)
|
|
|
+ tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ else:
|
|
|
+ tts = gTTS(txt,lang='zh-tw')
|
|
|
+ tts.save(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ #speed up
|
|
|
+ ff = ffmpy.FFmpeg(inputs={dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3": None}
|
|
|
+ , outputs={dir_sound+name_hash+"/"+str(txt_idx)+".mp3": ["-filter:a", "atempo=1.2"]})
|
|
|
+ ff.run()
|
|
|
+ os.remove(dir_sound+name_hash+"/"+str(txt_idx)+"raw.mp3")
|
|
|
+ else:
|
|
|
+ print('use zhtts')
|
|
|
+ tts = zhtts.TTS()
|
|
|
+ tts.text2wav(txt,dir_sound+name_hash+"/"+str(txt_idx)+".mp3")
|
|
|
+ txt_idx+=1
|
|
|
+ print("mp3 file made")
|
|
|
+ #make title as image
|
|
|
+ txt2image_title(name, dir_title+name_hash+".png",lang)
|
|
|
+
|
|
|
+def txt2image(content, save_target,lang='zh'):
|
|
|
+ unicode_text = trim_punctuation(content)
|
|
|
+ font = ''
|
|
|
+ if lang=='zh':
|
|
|
+ font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
|
|
|
+ else :
|
|
|
+ font = ImageFont.truetype(font="font/arial.ttf", size=38)
|
|
|
+ text_width, text_height = font.getsize(unicode_text)
|
|
|
+ canvas = Image.new('RGBA', (700, 500), (255, 0, 0, 0) )
|
|
|
+ draw = ImageDraw.Draw(canvas)
|
|
|
+ text= unicode_text
|
|
|
+ draw.text((5,5), text, (255, 255, 0), font)
|
|
|
+ canvas.save(save_target, "PNG")
|
|
|
+
|
|
|
+def txt2image_title(content, save_target, lang='zh'):
|
|
|
+ unicode_text = trim_punctuation(content)
|
|
|
+ font = ''
|
|
|
+ if lang=='zh':
|
|
|
+ font = ImageFont.truetype(font="font/DFT_B7.ttc", size=22)
|
|
|
+ else :
|
|
|
+ font = ImageFont.truetype(font="font/arial.ttf", size=22)
|
|
|
+ text_width, text_height = font.getsize(unicode_text)
|
|
|
+ canvas = Image.new('RGBA', (510, 500), (255, 0, 0, 0) )
|
|
|
+ draw = ImageDraw.Draw(canvas)
|
|
|
+ text= unicode_text
|
|
|
+ draw.text((5,5), text, (17, 41, 167), font)
|
|
|
+ canvas.save(save_target, "PNG")
|
|
|
+
|
|
|
+def call_anchor(fileName,avatar):
|
|
|
+ conn = rpyc.classic.connect("192.168.1.111",18812)
|
|
|
+ ros = conn.modules.os
|
|
|
+ rsys = conn.modules.sys
|
|
|
+ fr=open(dir_sound+fileName+".mp3",'rb')# voice
|
|
|
+ #warning!!! file my be replaced by other process
|
|
|
+ fw=conn.builtins.open('/tmp/output.mp3','wb')
|
|
|
+
|
|
|
+ while True:
|
|
|
+ b=fr.read(1024)
|
|
|
+ if b:
|
|
|
+ fw.write(b)
|
|
|
+ else:
|
|
|
+ break
|
|
|
+
|
|
|
+ fr.close()
|
|
|
+ fw.close()
|
|
|
+
|
|
|
+ val=random.randint(1000000,9999999)
|
|
|
+ ros.chdir('/home/jared/to_video')
|
|
|
+ ros.system('./p'+str(avatar)+'.sh '+str(val)+' &')
|
|
|
+
|
|
|
+ while True:
|
|
|
+ print('waiting...')
|
|
|
+ if ros.path.exists('/tmp/results/'+str(val)):
|
|
|
+ break
|
|
|
+ time.sleep(5)
|
|
|
+ print('waiting...')
|
|
|
+
|
|
|
+ fr=conn.builtins.open('/tmp/results/'+str(val)+'.mp4','rb')
|
|
|
+ fw=open(dir_anchor+fileName+".mp4",'wb')
|
|
|
+ while True:
|
|
|
+ b=fr.read(1024)
|
|
|
+ if b:
|
|
|
+ fw.write(b)
|
|
|
+ else:
|
|
|
+ break
|
|
|
+
|
|
|
+ fr.close()
|
|
|
+ fw.close()
|
|
|
+
|
|
|
+def syllable_count(word):
|
|
|
+ word = word.lower()
|
|
|
+ count = 0
|
|
|
+ vowels = "aeiouy"
|
|
|
+ if word[0] in vowels:
|
|
|
+ count += 1
|
|
|
+ for index in range(1, len(word)):
|
|
|
+ if word[index] in vowels and word[index - 1] not in vowels:
|
|
|
+ count += 1
|
|
|
+
|
|
|
+ if word.endswith("e"):
|
|
|
+ count -= 1
|
|
|
+ if count == 0:
|
|
|
+ count += 1
|
|
|
+ return count
|
|
|
+
|
|
|
+def split_sentence(in_str, maxLen):
|
|
|
+ re.findall(r'[\u4e00-\u9fff]+', in_str)
|
|
|
+
|
|
|
+ zh_idx = []
|
|
|
+ eng_idx= []
|
|
|
+ for i in range(len(in_str)):
|
|
|
+ if in_str[i] > u'\u4e00' and in_str[i] < u'\u9fff':
|
|
|
+ zh_idx.append(i)
|
|
|
+ else:
|
|
|
+ eng_idx.append(i)
|
|
|
+
|
|
|
+ space_index = [m.start() for m in re.finditer(' ', in_str)]
|
|
|
+ for idx in space_index:
|
|
|
+ eng_idx.remove(idx)
|
|
|
+
|
|
|
+ eng_range_list = []
|
|
|
+ for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
|
|
|
+ eng_range = list(map(itemgetter(1), g))
|
|
|
+ eng_range_list.append(eng_range)
|
|
|
+
|
|
|
+ total_syllable = 0
|
|
|
+ for i in range(len(eng_range_list)):
|
|
|
+ total_syllable += (syllable_count(in_str[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
|
|
|
+ for i in range(len(zh_idx)):
|
|
|
+ total_syllable+=1
|
|
|
+
|
|
|
+ #final chchchchchc[en][en][en]
|
|
|
+ #[en] is a vocabulary dict with occurence of image
|
|
|
+ zh_eng_idx_list = []
|
|
|
+ i = 0
|
|
|
+ while i < len(in_str):
|
|
|
+ if in_str[i]==' ':
|
|
|
+ i+=1
|
|
|
+ if i in zh_idx:
|
|
|
+ zh_eng_idx_list.append(i)
|
|
|
+ i+=1
|
|
|
+ if i in eng_idx:
|
|
|
+ for ls in eng_range_list:
|
|
|
+ if i in ls:
|
|
|
+ zh_eng_idx_list.append(ls)
|
|
|
+ i = ls[-1]+1
|
|
|
+ break
|
|
|
+
|
|
|
+ zh_eng_dict_list = [{'content':'','time_ratio':0}]
|
|
|
+ idx = 0
|
|
|
+ current_len = 0
|
|
|
+ sen_idx = 0
|
|
|
+ while idx < len(zh_eng_idx_list):
|
|
|
+ str_from_idx = ''
|
|
|
+ sylla_cnt = 1
|
|
|
+ if type(zh_eng_idx_list[idx])==type([]):
|
|
|
+ str_from_idx = in_str[zh_eng_idx_list[idx][0]:zh_eng_idx_list[idx][-1]+1]+' '
|
|
|
+ sylla_cnt = syllable_count(str_from_idx)
|
|
|
+ else:
|
|
|
+ str_from_idx = in_str[zh_eng_idx_list[idx]]
|
|
|
+
|
|
|
+
|
|
|
+ if len(zh_eng_dict_list[sen_idx]['content'])+sylla_cnt>=maxLen:
|
|
|
+ zh_eng_dict_list[sen_idx]['time_ratio'] = current_len/total_syllable
|
|
|
+
|
|
|
+ zh_eng_dict_list.append({'content':'','time_ratio':0})
|
|
|
+ sen_idx+=1
|
|
|
+ current_len = 0
|
|
|
+ else:
|
|
|
+ current_len += sylla_cnt
|
|
|
+ zh_eng_dict_list[sen_idx]['content'] += str_from_idx
|
|
|
+ idx+=1
|
|
|
+
|
|
|
+ total_ratio = 0
|
|
|
+ for obj in zh_eng_dict_list:
|
|
|
+ total_ratio+=obj['time_ratio']
|
|
|
+ zh_eng_dict_list[-1]['time_ratio'] = 1-total_ratio
|
|
|
+ return zh_eng_dict_list
|
|
|
+
|
|
|
+def parse_script(file_path,gt_list):
|
|
|
+ with open(file_path, 'r',encoding="utf-8") as f:
|
|
|
+ raw_lines = [line.strip() for line in f]
|
|
|
+ lines = adjustSub_by_text_similarity(gt_list,raw_lines)
|
|
|
+ text_parser = parser()
|
|
|
+ #make dict
|
|
|
+ dict_list = []
|
|
|
+ for idx in range(len(lines)):
|
|
|
+ script={}
|
|
|
+ rep_ls = text_parser.replace_list(lines[idx])
|
|
|
+ line_content = lines[idx]
|
|
|
+ for reptxt in rep_ls:
|
|
|
+ line_content = line_content.replace(reptxt,'')
|
|
|
+ if len(rep_ls)!=0:
|
|
|
+ script['image_idx'] = int(rep_ls[0].replace('{','').replace('}',''))
|
|
|
+ script['content'] = line_content
|
|
|
+ time_raw = raw_lines[idx * 4 +1 ].split(' --> ')
|
|
|
+ start = time_raw[0].split(':')
|
|
|
+ stop = time_raw[1].split(':')
|
|
|
+ script['start'] = float(start[0])*3600 + float(start[1])*60 + float(start[2].replace(',','.'))
|
|
|
+ script['stop'] = float(stop[0])*3600 + float(stop[1])*60 + float(stop[2].replace(',','.'))
|
|
|
+ dict_list.append(script)
|
|
|
+
|
|
|
+ #merge duplicated sentences
|
|
|
+ skip_list = []
|
|
|
+ script_not_dup_list = []
|
|
|
+ for idx in range(len(dict_list)):
|
|
|
+ if idx not in skip_list:
|
|
|
+ dup_list = []
|
|
|
+ found = 0
|
|
|
+ for idx_inner in range(len(dict_list)):
|
|
|
+ if dict_list[idx_inner]['content'] == dict_list[idx]['content'] and idx <= idx_inner:
|
|
|
+ dup_list.append(idx_inner)
|
|
|
+ skip_list.append(idx_inner)
|
|
|
+ found += 1
|
|
|
+ if found != 0 and dict_list[idx_inner]['content']!=dict_list[idx]['content'] and idx <= idx_inner:
|
|
|
+ found = 0
|
|
|
+ break
|
|
|
+
|
|
|
+ for dup_idx in dup_list:
|
|
|
+ if dup_idx == min(dup_list):
|
|
|
+ dict_list[dup_idx]['type'] = 'lead_sentence'
|
|
|
+ else:
|
|
|
+ dict_list[dup_idx]['type'] = 'duplicated'
|
|
|
+ dict_list[dup_list[0]]['stop'] = dict_list[dup_list[-1]]['stop']
|
|
|
+
|
|
|
+ if dict_list[idx]['type'] == 'lead_sentence':
|
|
|
+ script_not_dup_list.append(dict_list[idx])
|
|
|
+
|
|
|
+
|
|
|
+ new_idx = 0
|
|
|
+ splitted_dict = []
|
|
|
+ for dic in script_not_dup_list:
|
|
|
+ dic_idx = 0
|
|
|
+ accumulated_duration = 0
|
|
|
+ duration = dic['stop']-dic['start']
|
|
|
+
|
|
|
+ for sub_dic in split_sentence(dic['content'],13):
|
|
|
+ new_dic = {}
|
|
|
+ new_dic['index'] = new_idx
|
|
|
+ if 'image_idx' in dic:
|
|
|
+ new_dic['image_obj'] = {'start':dic['start'],'idx':dic['image_idx']}
|
|
|
+ new_idx+=1
|
|
|
+ ind_duration = duration * sub_dic['time_ratio']
|
|
|
+ new_dic['start'] = dic['start'] + accumulated_duration
|
|
|
+ accumulated_duration += ind_duration
|
|
|
+ new_dic['content'] = sub_dic['content']
|
|
|
+ new_dic['duration'] = ind_duration*0.7
|
|
|
+ splitted_dict.append(new_dic)
|
|
|
+ return splitted_dict
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+def adjustSub_by_text_similarity(gts_in,gens_raw):
|
|
|
+ #call by value only
|
|
|
+ gts = gts_in[:]
|
|
|
+ text_parser = parser()
|
|
|
+ for i in range(len(gts)):
|
|
|
+ rep_ls = text_parser.replace_list(gts[i])
|
|
|
+ for reptxt in rep_ls:
|
|
|
+ gts[i] = gts[i].replace(reptxt,'')
|
|
|
+
|
|
|
+ gens = []
|
|
|
+ for idx in range(int((len(gens_raw)+1)/4)):
|
|
|
+ gens.append(gens_raw[idx*4+2])
|
|
|
+
|
|
|
+ combine2 = [''.join([i,j]) for i,j in zip(gts, gts[1:])]
|
|
|
+ combine3 = [''.join([i,j,k]) for i,j,k in zip(gts, gts[1:], gts[2:])]
|
|
|
+ alls = gts #+ combine2 + combine3
|
|
|
+ adjusted = [None]*len(gens)
|
|
|
+ duplicated_list = []
|
|
|
+ for idx in range(len(gens)):
|
|
|
+ match_text = difflib.get_close_matches(gens[idx], alls, cutoff=0.1)
|
|
|
+ if len(match_text) != 0:
|
|
|
+ if match_text[0] not in duplicated_list:
|
|
|
+ adjusted[idx] = match_text[0]
|
|
|
+ duplicated_list.append(match_text[0])
|
|
|
+ else:
|
|
|
+ if match_text[0] == adjusted[idx-1]:
|
|
|
+ adjusted[idx] = match_text[0]
|
|
|
+ else:
|
|
|
+ found = 0
|
|
|
+ for mt in match_text:
|
|
|
+ if mt not in duplicated_list:
|
|
|
+ adjusted[idx] = mt
|
|
|
+ found += 1
|
|
|
+ break
|
|
|
+ if found ==0:
|
|
|
+ adjusted[idx] = ' '
|
|
|
+ else :
|
|
|
+ adjusted[idx] = ' '
|
|
|
+
|
|
|
+ combine2_tag = [''.join([i,j]) for i,j in zip(gts_in, gts_in[1:])]
|
|
|
+ combine3_tag = [''.join([i,j,k]) for i,j,k in zip(gts_in, gts_in[1:], gts_in[2:])]
|
|
|
+ alls_tag = gts_in #+ combine2_tag + combine3_tag
|
|
|
+
|
|
|
+ for idx in range(len(adjusted)):
|
|
|
+ match_text = difflib.get_close_matches(adjusted[idx], alls_tag, cutoff=0.1)
|
|
|
+ adjusted[idx] = match_text[0]
|
|
|
+ return adjusted
|
|
|
+
|
|
|
+def trim_punctuation(s):
|
|
|
+ pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+';
|
|
|
+ pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
|
|
|
+ res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
|
|
|
+ return res
|
|
|
+
|
|
|
+def splitter(s):
|
|
|
+ for sent in re.findall(u'[^!?,。\!\?]+[!? 。\!\?]?', s, flags=re.U):
|
|
|
+ yield sent
|
|
|
+
|
|
|
+def split_by_pun(s):
|
|
|
+ res = list(splitter(s))
|
|
|
+ return res
|
|
|
+
|
|
|
+def generate_subtitle_image_from_dict(name_hash, sub_dict):
|
|
|
+ for script in sub_dict:
|
|
|
+ sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
|
|
|
+ sub = script['content']
|
|
|
+ txt2image(sub,sv_path)
|
|
|
+
|
|
|
+def generate_subtitle_image(name_hash,text_content):
|
|
|
+ img_list = [None]*len(text_content)
|
|
|
+ for idx in range(len(text_content)):
|
|
|
+ img_list[idx]=[]
|
|
|
+ senList = split_by_pun(text_content[idx])
|
|
|
+ for inner_idx in range(len(senList)):
|
|
|
+ sv_path = dir_subtitle + name_hash +'/'+str(idx)+ str(inner_idx) +'.png'
|
|
|
+ sub = senList[inner_idx]
|
|
|
+ txt2image(sub,sv_path)
|
|
|
+ clean_content = trim_punctuation(sub)
|
|
|
+
|
|
|
+
|
|
|
+ re.findall(r'[\u4e00-\u9fff]+', clean_content)
|
|
|
+
|
|
|
+ zh_idx = []
|
|
|
+ eng_idx= []
|
|
|
+ for i in range(len(clean_content)):
|
|
|
+ if clean_content[i] > u'\u4e00' and clean_content[i] < u'\u9fff':
|
|
|
+ zh_idx.append(i)
|
|
|
+ else:
|
|
|
+ eng_idx.append(i)
|
|
|
+
|
|
|
+ space_index = [m.start() for m in re.finditer(' ', clean_content)]
|
|
|
+ for s_idx in space_index:
|
|
|
+ eng_idx.remove(s_idx)
|
|
|
+
|
|
|
+ eng_range_list = []
|
|
|
+ for k, g in groupby(enumerate(eng_idx), lambda ix : ix[0] - ix[1]):
|
|
|
+ eng_range = list(map(itemgetter(1), g))
|
|
|
+ eng_range_list.append(eng_range)
|
|
|
+
|
|
|
+ total_syllable = 0
|
|
|
+ for i in range(len(eng_range_list)):
|
|
|
+ total_syllable += (syllable_count(clean_content[eng_range_list[i][0]:eng_range_list[i][-1]+1])+0.5)
|
|
|
+ for i in range(len(zh_idx)):
|
|
|
+ total_syllable+=1
|
|
|
+
|
|
|
+
|
|
|
+ img_list[idx]+=[{"count":total_syllable,"path":sv_path}]
|
|
|
+ return img_list
|
|
|
+
|
|
|
+def generate_subtitle_image_ENG(name_hash,text_content):
|
|
|
+ img_list = [None]*len(text_content)
|
|
|
+ for idx in range(len(text_content)):
|
|
|
+ sv_path = dir_subtitle + name_hash +'/'+str(idx)+'.png'
|
|
|
+ sub = text_content[idx]
|
|
|
+ txt2image(sub, sv_path,lang='eng')
|
|
|
+ img_list[idx] = sv_path
|
|
|
+ return img_list
|
|
|
+
|
|
|
+def video_writer_init(path):
|
|
|
+ w = openshot.FFmpegWriter(path)
|
|
|
+ w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
|
|
|
+ w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
|
|
|
+ openshot.Fraction(1, 1), False, False, 3000000)
|
|
|
+ return w
|
|
|
+
|
|
|
+def video_gen(name_hash,name,text_content, image_urls,multiLang,avatar):
|
|
|
+ file_prepare_long(name, name_hash, text_content,image_urls,multiLang)
|
|
|
+
|
|
|
+ for fname in range(len(text_content)):
|
|
|
+ call_anchor(name_hash+"/"+str(fname),avatar)
|
|
|
+ print('called............................................')
|
|
|
+ ck=cKey(0,254,0,270)
|
|
|
+ ck_anchor=cKey(0,255,1,320)
|
|
|
+ t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
|
|
|
+ t.Open()
|
|
|
+ main_timer = 0
|
|
|
+ LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
|
|
|
+ LOGO_OP.Open() # Open the reader
|
|
|
+ head_duration = LOGO_OP.info.duration
|
|
|
+ LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration
|
|
|
+ ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
|
|
|
+ t.AddClip(LOGO_OP_clip)
|
|
|
+ bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
|
|
|
+ bg_head.Open()
|
|
|
+ bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_head_clip)
|
|
|
+ main_timer += head_duration
|
|
|
+ bg_head.Close()
|
|
|
+ LOGO_OP.Close()
|
|
|
+
|
|
|
+ anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
|
|
|
+ anchor.Open()
|
|
|
+ #anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
|
|
|
+ # location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
|
|
|
+ #t.AddClip(anchor_clip)
|
|
|
+
|
|
|
+ speech = openshot.FFmpegReader(dir_sound+name_hash+"/0.mp3")
|
|
|
+ speech.Open()
|
|
|
+ speech_clip = openshot.Clip(speech)
|
|
|
+ speech_clip.Position(main_timer)
|
|
|
+ speech_clip.End(anchor.info.duration)
|
|
|
+ t.AddClip(speech_clip)
|
|
|
+ main_timer += anchor.info.duration
|
|
|
+ anchor.Close()
|
|
|
+ speech.Close()
|
|
|
+
|
|
|
+ LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
|
|
|
+ LOGO_ED.Open()
|
|
|
+ LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration
|
|
|
+ ,location_x=0.005,location_y=-0.031, scale_x=0.8,scale_y=0.6825)
|
|
|
+ t.AddClip(LOGO_ED_clip)
|
|
|
+ main_timer += LOGO_ED.info.duration
|
|
|
+ LOGO_ED.Close()
|
|
|
+
|
|
|
+ bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg.Open()
|
|
|
+ bg_times = math.floor(main_timer/bg.info.duration)
|
|
|
+ left_time = (main_timer) % bg.info.duration
|
|
|
+ bg_clip_list = [None] * bg_times
|
|
|
+ bg_list = [None] * bg_times
|
|
|
+ bg.Close()
|
|
|
+ bg_timer = head_duration
|
|
|
+ for idx in range(bg_times):
|
|
|
+ bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_list[idx].Open()
|
|
|
+ bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer,end=bg_list[idx].info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_clip_list[idx])
|
|
|
+ bg_timer += bg_list[idx].info.duration
|
|
|
+ bg_list[idx].Close()
|
|
|
+ bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_left.Open()
|
|
|
+ bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
|
|
|
+ t.AddClip(bg_left_clip)
|
|
|
+ bg_left.Close()
|
|
|
+
|
|
|
+
|
|
|
+ title = openshot.QtImageReader(dir_title+name_hash+".png")
|
|
|
+ title.Open() # Open the reader
|
|
|
+ title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
|
|
|
+ t.AddClip(title_clip)
|
|
|
+
|
|
|
+ w = video_writer_init(tmp_video_dir+name_hash+"raw.mp4")
|
|
|
+ w.Open()
|
|
|
+ frames = int(t.info.fps)*int(main_timer)
|
|
|
+ for n in range(frames):
|
|
|
+ f=t.GetFrame(n)
|
|
|
+ w.WriteFrame(f)
|
|
|
+ t.Close()
|
|
|
+ w.Close()
|
|
|
+ print(name+"RAW DONE : www.choozmo.com:8168/"+tmp_video_dir+name_hash+"raw.mp4")
|
|
|
+ #start adding sub
|
|
|
+
|
|
|
+ #add sub
|
|
|
+ Ctr_Autosub.init()
|
|
|
+ Ctr_Autosub.generate_subtitles(tmp_video_dir+name_hash+"raw.mp4",'zh',listener_progress,output=tmp_video_dir+name_hash+"script.txt",concurrency=DEFAULT_CONCURRENCY,subtitle_file_format=DEFAULT_SUBTITLE_FORMAT)
|
|
|
+
|
|
|
+ sub_dict = parse_script(tmp_video_dir+name_hash+"script.txt",split_by_pun(text_content[0]))
|
|
|
+ for subd in sub_dict:
|
|
|
+ print(subd)
|
|
|
+
|
|
|
+ generate_subtitle_image_from_dict(name_hash, sub_dict)
|
|
|
+
|
|
|
+ #sv_path = dir_subtitle + name_hash + '/' + str(script['index'])+'.png'
|
|
|
+
|
|
|
+ t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
|
|
|
+ t.Open()
|
|
|
+
|
|
|
+ raw = openshot.FFmpegReader(tmp_video_dir+name_hash+"raw.mp4")
|
|
|
+ raw.Open()
|
|
|
+ raw_clip = video_photo_clip(vid=raw,layer=2,position=0, end=raw.info.duration)
|
|
|
+ t.AddClip(raw_clip)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ sub_img_list = [None] * len(sub_dict)
|
|
|
+ sub_clip_list = [None] * len(sub_dict)
|
|
|
+ for sub_obj in sub_dict:
|
|
|
+ idx = int(sub_obj['index'])
|
|
|
+ sub_img_list[idx] = openshot.QtImageReader(dir_subtitle + name_hash + '/' + str(idx)+'.png')
|
|
|
+ sub_img_list[idx].Open()
|
|
|
+ #if sub_obj['duration']>3:
|
|
|
+ # print('warning')
|
|
|
+ #print('start:',sub_obj['start'],', duration :', sub_obj['duration'],' content',sub_obj['content'],'idx:',sub_obj['index'])
|
|
|
+
|
|
|
+ sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=sub_obj['start'],end=math.ceil(sub_obj['duration']))
|
|
|
+ t.AddClip(sub_clip_list[idx])
|
|
|
+ sub_img_list[idx].Close()
|
|
|
+
|
|
|
+
|
|
|
+ tp = parser()
|
|
|
+ img_dict_ls = tp.image_clip_info(sub_dict)
|
|
|
+ img_clip_list = [None]*len(listdir(dir_photo+name_hash))
|
|
|
+ img_list = [None]*len(img_clip_list)
|
|
|
+
|
|
|
+ img_file_ls = listdir(dir_photo+name_hash)
|
|
|
+
|
|
|
+ for img_idx in range(len(img_file_ls)):
|
|
|
+ img_list[img_idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+img_file_ls[img_idx])
|
|
|
+ img_list[img_idx].Open()
|
|
|
+ img_clip_list[img_idx] = video_photo_clip(vid=img_list[img_idx],layer=3
|
|
|
+ ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=img_dict_ls[img_idx]['start'],end=img_dict_ls[img_idx]['duration'],audio=False)
|
|
|
+ t.AddClip(img_clip_list[img_idx])
|
|
|
+ img_list[img_idx].Close()
|
|
|
+
|
|
|
+ anchor = openshot.FFmpegReader(dir_anchor+name_hash+"/0.mp4")
|
|
|
+ anchor.Open()
|
|
|
+ anchor_clip = video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
|
|
|
+ location_x=0.35,location_y=0.25,position=head_duration, end=anchor.info.duration,ck=ck_anchor,audio=False)
|
|
|
+ t.AddClip(anchor_clip)
|
|
|
+
|
|
|
+
|
|
|
+ w = video_writer_init(tmp_video_dir+name_hash+".mp4")
|
|
|
+ w.Open()
|
|
|
+ frames = int(t.info.fps)*int(main_timer)
|
|
|
+ for n in range(frames):
|
|
|
+ f=t.GetFrame(n)
|
|
|
+ w.WriteFrame(f)
|
|
|
+ t.Close()
|
|
|
+ w.Close()
|
|
|
+
|
|
|
+
|
|
|
+ os.remove(tmp_video_dir+name_hash+"raw.mp4")
|
|
|
+ os.remove(tmp_video_dir+name_hash+"script.txt")
|
|
|
+ print(name+"ALL DONE : www.choozmo.com:8168/"+video_sub_folder+name_hash+"raw.mp4")
|
|
|
+
|
|
|
+
|
|
|
+def anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
|
|
|
+ print(name)
|
|
|
+ print(text_content)
|
|
|
+ print(os.getcwd())
|
|
|
+ print('sub image made')
|
|
|
+ print(multiLang)
|
|
|
+ file_prepare(name, name_hash, text_content,image_urls,multiLang)
|
|
|
+ sub_list=generate_subtitle_image(name_hash,text_content)
|
|
|
+
|
|
|
+ for fname in range(len(text_content)):
|
|
|
+ call_anchor(name_hash+"/"+str(fname),avatar)
|
|
|
+ print('step finish')
|
|
|
+ print('called............................................')
|
|
|
+
|
|
|
+ ck=cKey(0,254,0,270)
|
|
|
+ ck_anchor=cKey(0,255,1,320)
|
|
|
+ duration = 0
|
|
|
+ #average layer level is 3
|
|
|
+ t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
|
|
|
+ t.Open()
|
|
|
+
|
|
|
+ main_timer = 0
|
|
|
+
|
|
|
+ LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
|
|
|
+ LOGO_OP.Open() # Open the reader
|
|
|
+ LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
|
|
|
+ ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
|
|
|
+ t.AddClip(LOGO_OP_clip)
|
|
|
+ bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
|
|
|
+ bg_head.Open()
|
|
|
+ bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_head_clip)
|
|
|
+ main_timer += LOGO_OP.info.duration
|
|
|
+ head_duration = LOGO_OP.info.duration
|
|
|
+ bg_head.Close()
|
|
|
+ LOGO_OP.Close()
|
|
|
+
|
|
|
+
|
|
|
+ clip_duration=0
|
|
|
+ photo_clip_list = [None]*len(text_content)
|
|
|
+ img_list = [None]*len(text_content)
|
|
|
+ anchor_clip_list = [None] * len(text_content)
|
|
|
+ anchor_list = [None] * len(text_content)
|
|
|
+ audio_clip_list = [None] * len(text_content)
|
|
|
+ audio_list = [None] * len(text_content)
|
|
|
+ sub_clip_list = [None] * len(text_content)
|
|
|
+ sub_img_list = [None] * len(text_content)
|
|
|
+
|
|
|
+ idx = 0
|
|
|
+ for p in listdir(dir_photo+name_hash):
|
|
|
+
|
|
|
+ anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
|
|
|
+ clip_duration = anchor_list[idx].info.duration
|
|
|
+ anchor_list[idx].Open()
|
|
|
+ anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
|
|
|
+ location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
|
|
|
+
|
|
|
+ print('avatar is ', avatar)
|
|
|
+ t.AddClip(anchor_clip_list[idx])
|
|
|
+
|
|
|
+
|
|
|
+ img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
|
|
|
+ img_list[idx].Open()
|
|
|
+ photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
|
|
|
+ ,scale_x=0.8,scale_y=0.6825,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
|
|
|
+ t.AddClip(photo_clip_list[idx])
|
|
|
+ img_list[idx].Close()
|
|
|
+
|
|
|
+ audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
|
|
|
+ audio_list[idx].Open()
|
|
|
+ audio_clip_list[idx] = openshot.Clip(audio_list[idx])
|
|
|
+ audio_clip_list[idx].Position(main_timer)
|
|
|
+ audio_clip_list[idx].End(clip_duration)
|
|
|
+ t.AddClip(audio_clip_list[idx])
|
|
|
+
|
|
|
+ img_list[idx].Close()
|
|
|
+ anchor_list[idx].Close()
|
|
|
+ audio_list[idx].Close()
|
|
|
+
|
|
|
+
|
|
|
+ sub_img_list[idx] = [None] * len(sub_list[idx])
|
|
|
+ sub_clip_list[idx] = [None] * len(sub_list[idx])
|
|
|
+ sub_timer = 0
|
|
|
+ for sub_idx in range(len(sub_list[idx])):
|
|
|
+ sub_img_list[idx][sub_idx] = openshot.QtImageReader(sub_list[idx][sub_idx]['path'])
|
|
|
+ sub_img_list[idx][sub_idx].Open()
|
|
|
+ sub_duration = 0.205*sub_list[idx][sub_idx]['count']
|
|
|
+ sub_clip_list[idx][sub_idx] = video_photo_clip(vid=sub_img_list[idx][sub_idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer+sub_timer,end=sub_duration)
|
|
|
+ t.AddClip(sub_clip_list[idx][sub_idx])
|
|
|
+ sub_img_list[idx][sub_idx].Close()
|
|
|
+ sub_timer += sub_duration
|
|
|
+ print(sub_list[idx][sub_idx]['path'])
|
|
|
+ main_timer += clip_duration
|
|
|
+ idx+=1
|
|
|
+
|
|
|
+ LOGO_ED = openshot.FFmpegReader(dir_video+"LOGO_ED.avi")
|
|
|
+ LOGO_ED.Open()
|
|
|
+ LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
|
|
|
+ ,location_x=0.005,location_y=-0.031
|
|
|
+ ,scale_x=0.8,scale_y=0.6825)
|
|
|
+ t.AddClip(LOGO_ED_clip)
|
|
|
+ ED_duration = LOGO_ED.info.duration
|
|
|
+ LOGO_ED.Close()
|
|
|
+
|
|
|
+
|
|
|
+ bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg.Open()
|
|
|
+ bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
|
|
|
+ left_time = (main_timer+ED_duration) % bg.info.duration
|
|
|
+ bg_clip_list = [None] * bg_times
|
|
|
+ bg_list = [None] * bg_times
|
|
|
+ bg.Close()
|
|
|
+ bg_timer = head_duration
|
|
|
+ for idx in range(bg_times):
|
|
|
+ bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_list[idx].Open()
|
|
|
+ bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
|
|
|
+ ,end=bg_list[idx].info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_clip_list[idx])
|
|
|
+ bg_timer += bg_list[idx].info.duration
|
|
|
+ bg_list[idx].Close()
|
|
|
+ bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_left.Open()
|
|
|
+ bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
|
|
|
+ t.AddClip(bg_left_clip)
|
|
|
+ bg_left.Close()
|
|
|
+
|
|
|
+ title = openshot.QtImageReader(dir_title+name_hash+".png")
|
|
|
+ title.Open() # Open the reader
|
|
|
+ title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
|
|
|
+ t.AddClip(title_clip)
|
|
|
+
|
|
|
+ if freeTrial==1:
|
|
|
+ print("THIS IS TRIAL")
|
|
|
+ wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
|
|
|
+ wm.Open()
|
|
|
+ wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
|
|
|
+ #t.AddClip(wm_clip)
|
|
|
+
|
|
|
+ else:
|
|
|
+ print("THIS IS NOT TRIAL")
|
|
|
+ print(freeTrial)
|
|
|
+
|
|
|
+ ####start building
|
|
|
+ w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
|
|
|
+ w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
|
|
|
+ w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
|
|
|
+ openshot.Fraction(1, 1), False, False, 3000000)
|
|
|
+ w.Open()
|
|
|
+
|
|
|
+ #may change duration into t.info.duration
|
|
|
+ frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
|
|
|
+ for n in range(frames):
|
|
|
+ f=t.GetFrame(n)
|
|
|
+ w.WriteFrame(f)
|
|
|
+
|
|
|
+ #notify_group(name+"的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
|
|
|
+ t.Close()
|
|
|
+ w.Close()
|
|
|
+ print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
|
|
|
+
|
|
|
+
|
|
|
+def anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
|
|
|
+ file_prepare(name, name_hash, text_content,image_urls,1,'eng')
|
|
|
+ sub_list=generate_subtitle_image_ENG(name_hash,sub_titles)
|
|
|
+
|
|
|
+ for fname in range(len(text_content)):
|
|
|
+ call_anchor(name_hash+"/"+str(fname),avatar)
|
|
|
+ print('step finish')
|
|
|
+ print('called............................................')
|
|
|
+
|
|
|
+ ck=cKey(0,254,0,270)
|
|
|
+ ck_anchor=cKey(0,255,1,320)
|
|
|
+ duration = 0
|
|
|
+ #average layer level is 3
|
|
|
+ t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
|
|
|
+ t.Open()
|
|
|
+
|
|
|
+ main_timer = 0
|
|
|
+ #add logo
|
|
|
+ LOGO_OP = openshot.FFmpegReader(dir_video+"LOGO_OP_4.mp4")
|
|
|
+ LOGO_OP.Open() # Open the reader
|
|
|
+ LOGO_OP_clip = video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=LOGO_OP.info.duration
|
|
|
+ ,location_y=-0.03,scale_x=0.8,scale_y=0.704)
|
|
|
+ t.AddClip(LOGO_OP_clip)
|
|
|
+ #add background video (head is different)
|
|
|
+ bg_head = openshot.FFmpegReader(dir_video+"complete_head_aispokesgirl.mp4")
|
|
|
+ bg_head.Open()
|
|
|
+ bg_head_clip = video_photo_clip(vid=bg_head,layer=2,position=0,end=LOGO_OP.info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_head_clip)
|
|
|
+
|
|
|
+ main_timer += LOGO_OP.info.duration
|
|
|
+ head_duration = LOGO_OP.info.duration
|
|
|
+ bg_head.Close()
|
|
|
+ LOGO_OP.Close()
|
|
|
+
|
|
|
+ #prepare empty list
|
|
|
+ clip_duration=0
|
|
|
+ photo_clip_list = [None]*len(text_content)
|
|
|
+ img_list = [None]*len(text_content)
|
|
|
+ anchor_clip_list = [None] * len(text_content)
|
|
|
+ anchor_list = [None] * len(text_content)
|
|
|
+ audio_clip_list = [None] * len(text_content)
|
|
|
+ audio_list = [None] * len(text_content)
|
|
|
+ sub_clip_list = [None] * len(text_content)
|
|
|
+ #openshot image holder
|
|
|
+ sub_img_list = [None] * len(text_content)
|
|
|
+
|
|
|
+ idx = 0
|
|
|
+ for p in listdir(dir_photo+name_hash):
|
|
|
+
|
|
|
+ anchor_list[idx] = openshot.FFmpegReader(dir_anchor+name_hash+"/"+str(idx)+".mp4")
|
|
|
+ clip_duration = anchor_list[idx].info.duration
|
|
|
+ anchor_list[idx].Open()
|
|
|
+ anchor_clip_list[idx] = video_photo_clip(vid=anchor_list[idx],layer=4,scale_x=0.65,scale_y=0.65,
|
|
|
+ location_x=0.35,location_y=0.25,position=main_timer, end=clip_duration,ck=ck_anchor,audio=False)
|
|
|
+ t.AddClip(anchor_clip_list[idx])
|
|
|
+ #insert image
|
|
|
+ img_list[idx] = openshot.FFmpegReader(dir_photo+name_hash+'/'+p)
|
|
|
+ img_list[idx].Open()
|
|
|
+ photo_clip_list[idx] = video_photo_clip(vid=img_list[idx],layer=3
|
|
|
+ ,scale_x=0.81,scale_y=0.68,location_y=-0.03,position=main_timer,end=clip_duration,audio=False)
|
|
|
+ t.AddClip(photo_clip_list[idx])
|
|
|
+ img_list[idx].Close()
|
|
|
+ #insert audio (speech)
|
|
|
+ audio_list[idx] = openshot.FFmpegReader(dir_sound+name_hash+"/"+str(idx)+".mp3")
|
|
|
+ audio_list[idx].Open()
|
|
|
+ audio_clip_list[idx] = openshot.Clip(audio_list[idx])
|
|
|
+ audio_clip_list[idx].Position(main_timer)
|
|
|
+ audio_clip_list[idx].End(clip_duration)
|
|
|
+ t.AddClip(audio_clip_list[idx])
|
|
|
+ #insert subtitle
|
|
|
+ sub_img_list[idx] = openshot.QtImageReader(sub_list[idx])
|
|
|
+ sub_img_list[idx].Open()
|
|
|
+ sub_clip_list[idx] = video_photo_clip(vid=sub_img_list[idx], layer=6,location_x=0.069, location_y=0.89,position=main_timer,end=clip_duration)
|
|
|
+ t.AddClip(sub_clip_list[idx])
|
|
|
+
|
|
|
+ img_list[idx].Close()
|
|
|
+ anchor_list[idx].Close()
|
|
|
+ audio_list[idx].Close()
|
|
|
+ sub_img_list[idx].Close()
|
|
|
+
|
|
|
+ main_timer += clip_duration
|
|
|
+ idx+=1
|
|
|
+
|
|
|
+ LOGO_ED = openshot.FFmpegReader(dir_video+"ED_ENG.mp4")
|
|
|
+ LOGO_ED.Open()
|
|
|
+ LOGO_ED_clip = video_photo_clip(vid=LOGO_ED,layer=4,position=main_timer,end=LOGO_ED.info.duration+2
|
|
|
+ ,location_x=0.005,location_y=-0.031
|
|
|
+ ,scale_x=0.8,scale_y=0.6825)
|
|
|
+ t.AddClip(LOGO_ED_clip)
|
|
|
+ ED_duration = LOGO_ED.info.duration
|
|
|
+ LOGO_ED.Close()
|
|
|
+
|
|
|
+
|
|
|
+ bg = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg.Open()
|
|
|
+ bg_times = math.floor(main_timer+ED_duration/bg.info.duration)
|
|
|
+ left_time = (main_timer+ED_duration) % bg.info.duration
|
|
|
+ bg_clip_list = [None] * bg_times
|
|
|
+ bg_list = [None] * bg_times
|
|
|
+ bg.Close()
|
|
|
+ bg_timer = head_duration
|
|
|
+ for idx in range(bg_times):
|
|
|
+ bg_list[idx] = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_list[idx].Open()
|
|
|
+ bg_clip_list[idx] = video_photo_clip(bg_list[idx],layer=2,position=bg_timer
|
|
|
+ ,end=bg_list[idx].info.duration,ck=ck)
|
|
|
+ t.AddClip(bg_clip_list[idx])
|
|
|
+ bg_timer += bg_list[idx].info.duration
|
|
|
+ bg_list[idx].Close()
|
|
|
+ bg_left = openshot.FFmpegReader(dir_video+"complete_double_aispokesgirl.mp4")
|
|
|
+ bg_left.Open()
|
|
|
+ bg_left_clip = video_photo_clip(bg_left,layer=2,position=bg_timer,end=left_time,ck=ck)
|
|
|
+ t.AddClip(bg_left_clip)
|
|
|
+ bg_left.Close()
|
|
|
+
|
|
|
+ title = openshot.QtImageReader(dir_title+name_hash+".png")
|
|
|
+ title.Open() # Open the reader
|
|
|
+ title_clip = video_photo_clip(vid=title, layer=4,location_x=-0.047, location_y=0.801,position=0,end=head_duration+main_timer)
|
|
|
+ t.AddClip(title_clip)
|
|
|
+
|
|
|
+ if freeTrial==1:
|
|
|
+ wm = openshot.QtImageReader(dir_video+"freeTrialWatermark.png")
|
|
|
+ wm.Open()
|
|
|
+ wm_clip = video_photo_clip(wm,layer=6,position=0,end=int(head_duration+main_timer+ED_duration))
|
|
|
+ #t.AddClip(wm_clip)
|
|
|
+ print("THIS IS TRIAL")
|
|
|
+ else:
|
|
|
+ print("THIS IS NOT TRIAL")
|
|
|
+ print(freeTrial)
|
|
|
+
|
|
|
+ ####start building
|
|
|
+ w = openshot.FFmpegWriter(tmp_video_dir+name_hash+".mp4")
|
|
|
+ w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
|
|
|
+ w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
|
|
|
+ openshot.Fraction(1, 1), False, False, 3000000)
|
|
|
+ w.Open()
|
|
|
+
|
|
|
+ #may change duration into t.info.duration
|
|
|
+ frames = int(t.info.fps)*int(head_duration+main_timer+ED_duration)
|
|
|
+ for n in range(frames):
|
|
|
+ f=t.GetFrame(n)
|
|
|
+ w.WriteFrame(f)
|
|
|
+
|
|
|
+ #notify_group(name+"(ENG)的影片已經產生完成囉! www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
|
|
|
+ t.Close()
|
|
|
+ w.Close()
|
|
|
+ print("video at : www.choozmo.com:8168/"+video_sub_folder+name_hash+".mp4")
|
|
|
+
|
|
|
+ #line notifs
|
|
|
+
|
|
|
+import pyttsx3
|
|
|
+def make_speech(text):
|
|
|
+ engine = pyttsx3.init()
|
|
|
+ #voices = engine.getProperty('voices')
|
|
|
+ engine.setProperty('voice', 'Mandarin')
|
|
|
+ engine.save_to_file(text, '/app/speech.mp3')
|
|
|
+ engine.runAndWait()
|
|
|
+
|
|
|
+
|
|
|
+class video_service(rpyc.Service):
|
|
|
+ def exposed_call_video(self,name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial):
|
|
|
+ print('ML:'+str(multiLang))
|
|
|
+ anchor_video_v2(name_hash,name,text_content, image_urls,multiLang,avatar,freeTrial)
|
|
|
+ def exposed_call_video_eng(self,name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial):
|
|
|
+ anchor_video_eng(name_hash,name,text_content, image_urls,sub_titles,avatar,freeTrial)
|
|
|
+ def exposed_call_video_gen(self,name_hash,name,text_content, image_urls,multiLang,avatar):
|
|
|
+ print('ML:'+str(multiLang))#this is long video version,
|
|
|
+ video_gen(name_hash,name,text_content, image_urls,multiLang,avatar)
|
|
|
+ def exposed_make_speech(self,text):
|
|
|
+ make_speech(text)
|
|
|
+
|
|
|
+
|
|
|
+from rpyc.utils.server import ThreadedServer
|
|
|
+t = ThreadedServer(video_service, port=8858)
|
|
|
+print('service started')
|
|
|
+t.start()
|