123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111 |
- # coding:utf-8
- import openshot
- import re
- from PIL import Image,ImageDraw,ImageFont
- def cKey(r,g,b,fuzz):
- col=openshot.Color()
- col.red=openshot.Keyframe(r)
- col.green=openshot.Keyframe(g)
- col.blue=openshot.Keyframe(b)
- return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
- def video_writer_init(path):
- w = openshot.FFmpegWriter(path)
- w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
- w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
- openshot.Fraction(1, 1), False, False, 3000000)
- return w
- def video_photo_clip(video=None,layer=None, position=None, end=None
- ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
- clip = openshot.Clip(video)
- clip.Layer(layer)
- clip.Position(position)
- clip.End(end)
- clip.scale_x=openshot.Keyframe(scale_x)
- clip.scale_y=openshot.Keyframe(scale_y)
- clip.location_x=openshot.Keyframe(location_x)
- clip.location_y=openshot.Keyframe(location_y)
-
- if ck!=None:
- clip.AddEffect(ck)
- if audio==True:
- clip.has_audio=openshot.Keyframe(1)
- else:
- clip.has_audio=openshot.Keyframe(0)
- return clip
- def trim_punctuation(s):
- pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+'
- pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
- res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
- return res
- def txt2image(content, save_target,lang='zh'):
- unicode_text = trim_punctuation(content)
- font = ''
- if lang=='zh':
- font = ImageFont.truetype(font="font/DFT_B7.ttc", size=38)
- else :
- font = ImageFont.truetype(font="font/arial.ttf", size=38)
- text_width, text_height = font.getsize(unicode_text)
- canvas = Image.new('RGBA', (700, 500), (255, 255, 255, 0) )
- draw = ImageDraw.Draw(canvas)
- text= unicode_text
- draw.text((5,5), text,'black', font)
- canvas.save(save_target, "PNG")
- def text_to_short_vedio(bg,title_bg,text,title,time):
- t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
- t.Open()
-
- ck=cKey(0,254,0,270)
- ck_anchor=cKey(0,255,0,320)
- anchor = openshot.FFmpegReader(bg) # 影片
- anchor.Open()
- anchor_clip = video_photo_clip(video=anchor, layer=2, scale_x=0.59, scale_y=0.59,location_x=-0.04, location_y=-0.04, position=0, end=5, ck=ck_anchor, audio=True)
- t.AddClip(anchor_clip)
- anchor.Close()
- anchor3 = openshot.FFmpegReader(title_bg) # 樣板
- anchor3.Open()
- anchor_clip3 = video_photo_clip(video=anchor3, layer=1, scale_x=1, scale_y=1,
- location_x=0, location_y=0, position=0, end=5, ck=ck_anchor, audio=True)
- t.AddClip(anchor_clip3)
- anchor3.Close()
- txt2image(text, "tmp/save_target.png",lang='zh')
- text_anchor = openshot.QtImageReader("tmp/save_target.png")
- text_anchor.Open()
- text_anchor_clip = video_photo_clip(video=text_anchor,layer=4,scale_x=1,scale_y=1,
- location_x=0.27,location_y=0.83,position=0, end=5,ck=ck_anchor,audio=True)
- t.AddClip(text_anchor_clip)
- text_anchor.Close()
- txt2image(title, "tmp/save_title.png",lang='zh')
- text_title = openshot.QtImageReader("tmp/save_title.png")
- text_title.Open()
- text_title_clip = video_photo_clip(video=text_title,layer=5,scale_x=0.5,scale_y=0.5,
- location_x=0.5,location_y=-0.145,position=0, end=5,ck=ck_anchor,audio=True)
- t.AddClip(text_title_clip)
- text_title.Close()
- w = video_writer_init("/app/output/test_op.mp4")
- w.Open()
- # frames = int(t.info.fps)*int(time)
- frames = t.info.fps.ToInt()*int(time)
- print('結果一',frames)
- for n in range(frames):
- # tmp = n%(int(t.info.fps)*3) +int(t.info.fps)*int(2)
- f=t.GetFrame(n)
- w.WriteFrame(f)
- t.Close()
- w.Close()
- if __name__ == '__main__':
- text_to_short_vedio("/app/input/movie_main/C0050.mp4","/app/examples/主播示意1.mp4","測試content","標題測試",14)
|