import openshot import os import re import time import pysrt import shutil from PIL import Image,ImageDraw,ImageFont def cKey(r,g,b,fuzz): col=openshot.Color() col.red=openshot.Keyframe(r) col.green=openshot.Keyframe(g) col.blue=openshot.Keyframe(b) return openshot.ChromaKey(col, openshot.Keyframe(fuzz)) def video_writer_init(path): w = openshot.FFmpegWriter(path) w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000) w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720, openshot.Fraction(1, 1), False, False, 3000000) return w def video_photo_clip(video=None,layer=None, position=None, end=None ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True): clip = openshot.Clip(video) clip.Layer(layer) clip.Position(position) clip.End(end) clip.scale_x=openshot.Keyframe(scale_x) clip.scale_y=openshot.Keyframe(scale_y) clip.location_x=openshot.Keyframe(location_x) clip.location_y=openshot.Keyframe(location_y) if ck!=None: clip.AddEffect(ck) if audio==True: clip.has_audio=openshot.Keyframe(1) else: clip.has_audio=openshot.Keyframe(0) return clip def trim_punctuation(s): pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+' pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block) res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s) return res #文字轉圖片 def txt2image(content, save_target,lang='zh',size=26,fon="font/DFT_B7.ttc"): unicode_text = trim_punctuation(content) font = '' if lang=='zh': font = ImageFont.truetype(font=fon, size=size) else : font = ImageFont.truetype(font="font/arial.ttf", size=size) W, H = (1280,500) canvas = Image.new('RGB', (W, H), "#00FF00") draw = ImageDraw.Draw(canvas) text= content if "\n" in text: w, h = draw.textsize(text.split("\n")[0],font = font) #draw.text(((W-w)/2,0), text[0:18],'black', font) text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white') w, h = draw.textsize(text.split("\n")[1],font = font) #draw.text(((W-w)/2,h+2), text[18:],'black', font) text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white') else: w, h = draw.textsize(content,font = font) #draw.text(((W-w)/2,0), text,'black', font) text_border(draw,(W-w)/2,0,text,font,'black','white') canvas.save(save_target, "PNG") def text_border(draw,x,y,text,font,shadowcolor,fillcolor): draw.text((x-1, y), text, font=font, fill=shadowcolor) draw.text((x+1, y), text, font=font, fill=shadowcolor) draw.text((x, y-1), text, font=font, fill=shadowcolor) draw.text((x, y+1), text, font=font, fill=shadowcolor) draw.text((x-1, y+1), text, font=font, fill=shadowcolor) draw.text((x+1, y-1), text, font=font, fill=shadowcolor) draw.text((x-1, y-1), text, font=font, fill=shadowcolor) draw.text((x+1, y+1), text, font=font, fill=shadowcolor) # thicker border draw.text((x-2, y-2), text, font=font, fill=shadowcolor) draw.text((x+2, y-2), text, font=font, fill=shadowcolor) draw.text((x-2, y+2), text, font=font, fill=shadowcolor) draw.text((x+2, y+2), text, font=font, fill=shadowcolor) # now draw the text over it draw.text((x, y), text, font=font, fill=fillcolor) def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = None ,vedio_time = 30,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"): t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO) t.Open() # 去背參數 ck = cKey(0, 254, 0, 270) ck_anchor = cKey(0, 255, 0, 320) anchor = openshot.FFmpegReader(mp4_file) anchor.Open() anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1, location_x=0,location_y=0,position=0, end=vedio_time,audio=True) t.AddClip(anchor_clip) anchor.Close() number = 0 sound_srt_file = "" #音檔自動產生srt(逐字稿) if ".srt" in sound_file: sound_srt_file = sound_file elif not sound_file is None: cmd = "autosub -S zh-TW -D zh-TW " + sound_file os.system(cmd) sound_srt_file = sound_file.split('.')[0] + ".srt" #開啟srt檔 try: subs = pysrt.open(sound_srt_file) text_form = [] for context in subs: #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds) end = context.end-context.start end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds) start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds) text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font}) for text_tmp in text_form: file_name = "tmp/save_target_" + str(number) + ".png" txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font']) exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number)) exec('text_anchor_{}.Open()'.format(number)) exec('text_anchor_{}.Open()'.format(number)) exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\ location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number)) exec('t.AddClip(text_anchor_clip_{})'.format(number)) exec('text_anchor_{}.Close()'.format(number)) number = number+1 except: print("無法開啟srt檔案(字幕產生失敗)") w = video_writer_init(output_filename) w.Open() frames = int(t.info.fps)*int(vedio_time) for n in range(frames): f=t.GetFrame(n) w.WriteFrame(f) t.Close() w.Close() #刪除暫存檔案 shutil.rmtree('tmp') os.mkdir('tmp') if __name__ == '__main__': text_to_short_vedio(mp4_file = "input/導盲犬影片.mp4",sound_file ='input/導盲犬影片.srt',vedio_time =284,text_font ="font/DFT_R7.ttc")