|
@@ -55,21 +55,44 @@ def txt2image(content, save_target,lang='zh',size=26,fon="font/DFT_B7.ttc"):
|
|
|
font = ImageFont.truetype(font="font/arial.ttf", size=size)
|
|
|
|
|
|
W, H = (1280,500)
|
|
|
- canvas = Image.new('RGBA', (W, H), (255, 255, 255, 0) )
|
|
|
+ canvas = Image.new('RGB', (W, H), "#00FF00")
|
|
|
draw = ImageDraw.Draw(canvas)
|
|
|
|
|
|
- text= unicode_text
|
|
|
- if len(text)>10 :
|
|
|
- w, h = draw.textsize(text[0:18],font = font)
|
|
|
- draw.text(((W-w)/2,0), text[0:18],'black', font)
|
|
|
- w, h = draw.textsize(text[18:],font = font)
|
|
|
- draw.text(((W-w)/2,h+2), text[18:],'black', font)
|
|
|
+ text= content
|
|
|
+ if "\n" in text:
|
|
|
+ w, h = draw.textsize(text.split("\n")[0],font = font)
|
|
|
+ #draw.text(((W-w)/2,0), text[0:18],'black', font)
|
|
|
+ text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white')
|
|
|
+ w, h = draw.textsize(text.split("\n")[1],font = font)
|
|
|
+ #draw.text(((W-w)/2,h+2), text[18:],'black', font)
|
|
|
+ text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white')
|
|
|
else:
|
|
|
w, h = draw.textsize(content,font = font)
|
|
|
- draw.text(((W-w)/2,0), text,'black', font)
|
|
|
+ #draw.text(((W-w)/2,0), text,'black', font)
|
|
|
+ text_border(draw,(W-w)/2,0,text,font,'black','white')
|
|
|
canvas.save(save_target, "PNG")
|
|
|
|
|
|
-def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = "input/example/test3.mp4",vedio_time = 30,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"):
|
|
|
+def text_border(draw,x,y,text,font,shadowcolor,fillcolor):
|
|
|
+ draw.text((x-1, y), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x+1, y), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x, y-1), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x, y+1), text, font=font, fill=shadowcolor)
|
|
|
+
|
|
|
+ draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
|
|
|
+ # thicker border
|
|
|
+ draw.text((x-2, y-2), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x+2, y-2), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x-2, y+2), text, font=font, fill=shadowcolor)
|
|
|
+ draw.text((x+2, y+2), text, font=font, fill=shadowcolor)
|
|
|
+
|
|
|
+ # now draw the text over it
|
|
|
+ draw.text((x, y), text, font=font, fill=fillcolor)
|
|
|
+
|
|
|
+def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = None
|
|
|
+ ,vedio_time = 30,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"):
|
|
|
t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
|
|
|
t.Open()
|
|
|
|
|
@@ -80,37 +103,45 @@ def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = "input
|
|
|
anchor = openshot.FFmpegReader(mp4_file)
|
|
|
anchor.Open()
|
|
|
anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1,
|
|
|
- location_x=0,location_y=0,position=0, end=vedio_time,ck=ck_anchor,audio=True)
|
|
|
+ location_x=0,location_y=0,position=0, end=vedio_time,audio=True)
|
|
|
t.AddClip(anchor_clip)
|
|
|
anchor.Close()
|
|
|
number = 0
|
|
|
|
|
|
+ sound_srt_file = ""
|
|
|
#音檔自動產生srt(逐字稿)
|
|
|
- cmd = "autosub -S zh-TW -D zh-TW " + sound_file
|
|
|
- os.system(cmd)
|
|
|
+ if ".srt" in sound_file:
|
|
|
+ sound_srt_file = sound_file
|
|
|
+ elif not sound_file is None:
|
|
|
+ cmd = "autosub -S zh-TW -D zh-TW " + sound_file
|
|
|
+ os.system(cmd)
|
|
|
+ sound_srt_file = sound_file.split('.')[0] + ".srt"
|
|
|
+
|
|
|
|
|
|
#開啟srt檔
|
|
|
- sound_srt_file = sound_file.split('.')[0] + ".srt"
|
|
|
- subs = pysrt.open(sound_srt_file)
|
|
|
- text_form = []
|
|
|
- for context in subs:
|
|
|
- #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
|
|
|
- end = context.end-context.start
|
|
|
- end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
|
|
|
- start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
|
|
|
- text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':30,'font':text_font})
|
|
|
-
|
|
|
- for text_tmp in text_form:
|
|
|
- file_name = "tmp/save_target_" + str(number) + ".png"
|
|
|
- txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
|
|
|
- exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number))
|
|
|
- exec('text_anchor_{}.Open()'.format(number))
|
|
|
- exec('text_anchor_{}.Open()'.format(number))
|
|
|
- exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
|
|
|
- location_x=0,location_y=0.7,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
|
|
|
- exec('t.AddClip(text_anchor_clip_{})'.format(number))
|
|
|
- exec('text_anchor_{}.Close()'.format(number))
|
|
|
- number = number+1
|
|
|
+ try:
|
|
|
+ subs = pysrt.open(sound_srt_file)
|
|
|
+ text_form = []
|
|
|
+ for context in subs:
|
|
|
+ #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
|
|
|
+ end = context.end-context.start
|
|
|
+ end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
|
|
|
+ start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
|
|
|
+ text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
|
|
|
+
|
|
|
+ for text_tmp in text_form:
|
|
|
+ file_name = "tmp/save_target_" + str(number) + ".png"
|
|
|
+ txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
|
|
|
+ exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number))
|
|
|
+ exec('text_anchor_{}.Open()'.format(number))
|
|
|
+ exec('text_anchor_{}.Open()'.format(number))
|
|
|
+ exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
|
|
|
+ location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
|
|
|
+ exec('t.AddClip(text_anchor_clip_{})'.format(number))
|
|
|
+ exec('text_anchor_{}.Close()'.format(number))
|
|
|
+ number = number+1
|
|
|
+ except:
|
|
|
+ print("無法開啟srt檔案(字幕產生失敗)")
|
|
|
|
|
|
w = video_writer_init(output_filename)
|
|
|
w.Open()
|
|
@@ -123,15 +154,14 @@ def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = "input
|
|
|
t.Close()
|
|
|
w.Close()
|
|
|
|
|
|
-
|
|
|
-if __name__ == '__main__':
|
|
|
-
|
|
|
-
|
|
|
- #text_form = [{'text':"texttexttext",'start':0,'end':3,'size':26},{'text':"test22222222",'start':4,'end':6,'size':26}]
|
|
|
- #print(text_form)
|
|
|
- text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file ='input/example/test3.mp4',vedio_time =284,text_font ="font/DFT_HNT7.ttc")
|
|
|
-
|
|
|
#刪除暫存檔案
|
|
|
shutil.rmtree('tmp')
|
|
|
os.mkdir('tmp')
|
|
|
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+
|
|
|
+ text_to_short_vedio(mp4_file = "input/導盲犬影片.mp4",sound_file ='input/導盲犬影片.srt',vedio_time =284,text_font ="font/DFT_R7.ttc")
|
|
|
+
|
|
|
+
|
|
|
+
|