| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392 | 
							- import openshot
 
- import re
 
- from PIL import Image,ImageDraw,ImageFont
 
- import pandas as pd
 
- import os
 
- import cv2
 
- import numpy as np
 
- import moviepy.editor as mp
 
- import time
 
- import pysrt
 
- import shutil
 
- import rpyc
 
- import random
 
- import string
 
- import requests
 
- from bs4 import BeautifulSoup
 
- def cKey(r,g,b,fuzz):
 
-     col=openshot.Color()
 
-     col.red=openshot.Keyframe(r)
 
-     col.green=openshot.Keyframe(g)
 
-     col.blue=openshot.Keyframe(b)
 
-     return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
 
- def video_writer_init(path):
 
-     w = openshot.FFmpegWriter(path)
 
-     w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
 
-     w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
 
-         openshot.Fraction(1, 1), False, False, 3000000)
 
-     return w
 
- def video_photo_clip(video=None,layer=None, position=None, end=None
 
-     ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
 
-     clip = openshot.Clip(video)
 
-     clip.Layer(layer)
 
-     clip.Position(position)
 
-     clip.End(end)
 
-     clip.scale_x=openshot.Keyframe(scale_x)
 
-     clip.scale_y=openshot.Keyframe(scale_y)
 
-     clip.location_x=openshot.Keyframe(location_x)
 
-     clip.location_y=openshot.Keyframe(location_y)
 
-     
 
-     if ck!=None:
 
-         clip.AddEffect(ck)
 
-     if audio==True:
 
-         clip.has_audio=openshot.Keyframe(1)
 
-     else:
 
-         clip.has_audio=openshot.Keyframe(0)
 
-     return clip
 
- def trim_punctuation(s):
 
-     pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+'
 
-     pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
 
-     res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
 
-     return res
 
- def randomString(stringLength=10):
 
-     letters = string.ascii_lowercase
 
-     return ''.join(random.choice(letters) for i in range(stringLength))
 
- def mp3_to_anchor(fname):
 
-     conn = rpyc.classic.connect("192.168.192.221",18812)
 
-     fr=open(fname,'rb')
 
-     ropen = conn.builtins.open
 
-     randname=randomString(10)
 
-     finalname=randomString(10)
 
-     fw=ropen('/tmp/'+randname+'.mp4','wb')
 
-     fw.write(fr.read())
 
-     fw.close()
 
-     ros = conn.modules.os
 
-     ros.system('/root/to_video/p9.sh '+randname+".mp4 "+finalname+".mp4")
 
-     return 'http://192.168.192.221/video/'+finalname+'.mp4'
 
- #    conn.execute('import os')
 
- def download_mp4(url,name):
 
-     headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
 
-     with open('input/bg/ai_spokesgirl_%s.mp4'%(name),'wb') as f:
 
-         r = requests.get(url, headers=headers, stream=True)
 
-         if r.status_code == 404:
 
-             return False
 
-         for chunk in r.iter_content(chunk_size=1024):
 
-             if chunk:
 
-                 f.write(chunk)
 
-     return True
 
-                 
 
-    
 
-  
 
- def text_to_short_vedio_create(title_ad="input/people/peggy1_1_long.mp4",op="input/bg/LOGO_OP_4.mp4",ed="input/bg/LOGO_ED.mp4",bg="input/bg/串場樣板1_long.mp4"):
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     
 
-     # 去背參數
 
-     # ck=cKey(0,254,0,270)
 
-     ck=cKey(0,255,0,320)
 
-     ck_anchor=None
 
-     #時間
 
-     time_= 0
 
-     
 
- 	
 
-     csv_use=pd.read_csv("導盲犬協會.csv")
 
-     csv_use=csv_use.dropna(how='all')
 
-     csv_use.reset_index(inplace=True)
 
-     anchor_op = openshot.FFmpegReader(op)
 
-     anchor_op.Open()
 
-     anchor_clip_op = video_photo_clip(video=anchor_op,layer=2,scale_x=1,scale_y=1,
 
-             location_x=0,location_y=0,position=time_, end=anchor_op.info.duration,ck=ck_anchor,audio=True)
 
-     
 
-     t.AddClip(anchor_clip_op)
 
-     anchor_op.Close()
 
-     time_+=anchor_op.info.duration
 
-     for i in range(len(csv_use)):
 
-     # for i in range(3):
 
-         pwd_=str(csv_use.loc[i,['音檔']].values[0])
 
-         
 
-         locals()['anchor_music'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_)
 
-         locals()['anchor_music'+str(i)].Open()
 
-         locals()['anchor_music'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music'+str(i)],layer=3,scale_x=0,scale_y=0,
 
-                    location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=True)
 
-         t.AddClip(locals()['anchor_music'+str(i)+'clip'])
 
-         locals()['anchor_music'+str(i)].Close()
 
-         if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
 
-             scale_x_use = 0.59
 
-             scale_y_use = 0.59
 
-             t1 = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-             t1.Open()
 
-             pwd_=str(csv_use.loc[i,['音檔']].values[0])
 
-         
 
-             locals()['anchor_music_t'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_)
 
-             locals()['anchor_music_t'+str(i)].Open()
 
-             locals()['anchor_music_t'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music_t'+str(i)],layer=3,scale_x=0,scale_y=0,
 
-                    location_x=0,location_y=0,position=0, end=locals()['anchor_music_t'+str(i)].info.duration,ck=ck_anchor,audio=True)
 
-             t1.AddClip(locals()['anchor_music_t'+str(i)+'clip'])
 
-             locals()['anchor_music_t'+str(i)].Close()
 
-             w = video_writer_init("input/people/%s.mp4"%('anchor_music'+str(i)))
 
-             w.Open()
 
-             frames = int(t1.info.fps)*int(locals()['anchor_music_t'+str(i)].info.duration)
 
-             for n in range(frames):
 
-                 f=t1.GetFrame(n)
 
-                 w.WriteFrame(f)
 
-             w.Close()
 
-             t1.Close()
 
-             fname=mp3_to_anchor("input/people/%s.mp4"%('anchor_music'+str(i)))
 
-             # print(fname)
 
-             time.sleep(180)
 
-             while True:
 
-                 result = download_mp4(fname,str(i))
 
-                 if result:
 
-                     break
 
-                 print('等待...')
 
-                 time.sleep(60)
 
-         else:
 
-             scale_x_use = 1
 
-             scale_y_use = 1
 
-         choose=str(csv_use.loc[i,['素材']].values[0]).split(".")[-1]
 
-         pwd_p1=str(csv_use.loc[i,['素材']].values[0])
 
-         if choose == 'mp4':
 
-             locals()['anchor'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_p1)
 
-             locals()['anchor'+str(i)].Open()
 
-             locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
 
-                location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
 
-         
 
-             t.AddClip(locals()['anchor'+str(i)+'clip'])
 
-             locals()['anchor'+str(i)].Close()
 
-             if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
 
-                 locals()['anchor_ad'+str(i)] = openshot.FFmpegReader('input/bg/ai_spokesgirl_%s.mp4'%(str(i)))
 
-                 locals()['anchor_ad'+str(i)].Open()
 
-                 locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
 
-                    location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
 
-                 t.AddClip(locals()['anchor_clip_ad'+str(i)])
 
-                 locals()['anchor_ad'+str(i)].Close()
 
-                 locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
 
-                 locals()['anchor_bg'+str(i)].Open()
 
-                 locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
 
-                    location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
 
-                 t.AddClip(locals()['anchor_clip_bg'+str(i)])
 
-                 locals()['anchor_bg'+str(i)].Close()
 
-          
 
-         elif choose == 'jpg':
 
-             locals()['anchor'+str(i)] = openshot.QtImageReader("導盲犬協會影片素材2/"+pwd_p1)
 
-             locals()['anchor'+str(i)].Open()
 
-             locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
 
-                location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
 
-             t.AddClip(locals()['anchor'+str(i)+'clip'])
 
-             locals()['anchor'+str(i)].Close()
 
-             if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
 
-                 locals()['anchor_ad'+str(i)] = openshot.FFmpegReader("input/bg/ai_spokesgirl_%s.mp4"%(str(i)))
 
-                 locals()['anchor_ad'+str(i)].Open()
 
-                 locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
 
-                    location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
 
-                 t.AddClip(locals()['anchor_clip_ad'+str(i)])
 
-                 locals()['anchor_ad'+str(i)].Close()
 
-                 locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
 
-                 locals()['anchor_bg'+str(i)].Open()
 
-                 locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
 
-                    location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
 
-                 t.AddClip(locals()['anchor_clip_bg'+str(i)])
 
-                 locals()['anchor_bg'+str(i)].Close()
 
-         time_+=locals()['anchor_music'+str(i)].info.duration
 
-             
 
-  
 
-     
 
-     
 
-     anchor_ed = openshot.FFmpegReader(ed)
 
-     anchor_ed.Open()
 
-     anchor_clip_ed = video_photo_clip(video=anchor_ed,layer=2,scale_x=1,scale_y=1,
 
-             location_x=0,location_y=0,position=time_, end=anchor_ed.info.duration,ck=ck_anchor,audio=True)
 
-     time_+=anchor_ed.info.duration
 
-     t.AddClip(anchor_clip_ed)
 
-     anchor_ed.Close()
 
-   
 
-     
 
-     
 
-     w = video_writer_init("output/test7.mp4")
 
-     w.Open()
 
-     
 
-     frames = int(t.info.fps)*int(time_)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-     t.Close()
 
-     w.Close()
 
- #文字轉圖片
 
- def txt2image(content, save_target,lang='zh',size=26,fon="font/DFT_B7.ttc"):
 
-     unicode_text = trim_punctuation(content)
 
-     font = ''
 
-     if lang=='zh':
 
-         font = ImageFont.truetype(font=fon, size=size)
 
-     else :
 
-         font = ImageFont.truetype(font="font/arial.ttf", size=size)
 
-     
 
-     W, H = (1280,500)
 
-     canvas = Image.new('RGB', (W, H), "#00FF00")
 
-     draw = ImageDraw.Draw(canvas)
 
-     
 
-     text= content
 
-     if "\n" in text:
 
-         w, h = draw.textsize(text.split("\n")[0],font = font)
 
-         #draw.text(((W-w)/2,0), text[0:18],'black', font)
 
-         text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white')
 
-         w, h = draw.textsize(text.split("\n")[1],font = font)
 
-         #draw.text(((W-w)/2,h+2), text[18:],'black', font)
 
-         text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white')
 
-     else:
 
-         w, h = draw.textsize(content,font = font)
 
-         #draw.text(((W-w)/2,0), text,'black', font)
 
-         text_border(draw,(W-w)/2,0,text,font,'black','white')
 
-     canvas.save(save_target, "PNG")
 
- def text_border(draw,x,y,text,font,shadowcolor,fillcolor):
 
-     draw.text((x-1, y), text, font=font, fill=shadowcolor)
 
-     draw.text((x+1, y), text, font=font, fill=shadowcolor)
 
-     draw.text((x, y-1), text, font=font, fill=shadowcolor)
 
-     draw.text((x, y+1), text, font=font, fill=shadowcolor)
 
-     draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
 
-     draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
 
-     draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
 
-     draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
 
-     # thicker border
 
-     draw.text((x-2, y-2), text, font=font, fill=shadowcolor)
 
-     draw.text((x+2, y-2), text, font=font, fill=shadowcolor)
 
-     draw.text((x-2, y+2), text, font=font, fill=shadowcolor)
 
-     draw.text((x+2, y+2), text, font=font, fill=shadowcolor)
 
-     # now draw the text over it
 
-     draw.text((x, y), text, font=font, fill=fillcolor)
 
- def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = None
 
-                         ,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"):
 
-     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
 
-     t.Open()
 
-     
 
-     # 去背參數
 
-     ck = cKey(0, 254, 0, 270)
 
-     ck_anchor = cKey(0, 255, 0, 320)
 
-     anchor = openshot.FFmpegReader(mp4_file)
 
-     anchor.Open()
 
-     anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1,
 
-             location_x=0,location_y=0,position=0, end=anchor.info.duration,audio=True)
 
-     t.AddClip(anchor_clip)
 
-     anchor.Close()
 
-     number = 0
 
-     sound_srt_file = ""
 
-     #音檔自動產生srt(逐字稿)
 
-     if ".srt" in sound_file:
 
-         sound_srt_file = sound_file
 
-     elif not sound_file is None:
 
-         cmd = "autosub -S zh-TW -D zh-TW " + sound_file
 
-         os.system(cmd)
 
-         sound_srt_file = sound_file.split('.')[0] + ".srt"
 
-     
 
-     #開啟srt檔
 
-     try:
 
-         subs = pysrt.open(sound_srt_file)
 
-         text_form = []
 
-         for context in subs:
 
-             #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
 
-             end = context.end-context.start
 
-             end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
 
-             start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
 
-             text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
 
-         for text_tmp in text_form:
 
-             file_name = "tmp/save_target_" + str(number) + ".png"
 
-             txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
 
-             exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number))
 
-             exec('text_anchor_{}.Open()'.format(number))
 
-             exec('text_anchor_{}.Open()'.format(number))
 
-             exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
 
-                     location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
 
-             exec('t.AddClip(text_anchor_clip_{})'.format(number))
 
-             exec('text_anchor_{}.Close()'.format(number))
 
-             number = number+1
 
-     except:
 
-         print("無法開啟srt檔案(字幕產生失敗)")
 
-     w = video_writer_init(output_filename)
 
-     w.Open()
 
-     frames = int(t.info.fps)*int(anchor.info.duration)
 
-     for n in range(frames):
 
-         f=t.GetFrame(n)
 
-         w.WriteFrame(f)
 
-     t.Close()
 
-     w.Close()
 
-     #刪除暫存檔案
 
-     shutil.rmtree('tmp')
 
-     os.mkdir('tmp')
 
- if __name__ == '__main__':
 
- 	# picture_change()
 
- 	text_to_short_vedio_create()
 
- 	text_to_short_vedio(mp4_file = "output/test7.mp4",sound_file ='output/test7.mp4',text_font ="font/DFT_R7.ttc")
 
 
  |