Ver código fonte

Merge branch 'master' of http://git.choozmo.com:3000/choozmo/ai-spokesgirl

Mia 2 anos atrás
pai
commit
bc1945d4fb
3 arquivos alterados com 462 adições e 18 exclusões
  1. 28 2
      mp3_to_anchor.py
  2. 42 16
      op_and_ed.py
  3. 392 0
      openshotExample3.py

+ 28 - 2
mp3_to_anchor.py

@@ -2,6 +2,10 @@ import rpyc
 import os
 import random
 import string
+import requests
+from bs4 import BeautifulSoup
+import time
+
 
 def randomString(stringLength=10):
     letters = string.ascii_lowercase
@@ -25,6 +29,28 @@ def mp3_to_anchor(fname):
 #    conn.execute('import os')
 
 
-fname=mp3_to_anchor('c:/tmp/haka.mp3')
-print(fname)
+def download_mp4(url):
+    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
+    with open('ai_spokesgirl.mp4','wb') as f:
+        r = requests.get(url, headers=headers, stream=True)
+        if r.status_code == 404:
+            return False
+        for chunk in r.iter_content(chunk_size=1024):
+            if chunk:
+                f.write(chunk)
+    return True
+
+
+if __name__ == '__main__':
+    fname=mp3_to_anchor('/Users/zooeytsai/kw_tools/openshot/input/movie_main/Q8.mp3')
+    print(fname)
+    time.sleep(240)
+    while True:
+        result = download_mp4(fname)
+        if result:
+            break
+        print('等待...')
+        time.sleep(60)
+
+    
 

+ 42 - 16
op_and_ed.py

@@ -1,10 +1,11 @@
 import openshot
 
-def cKey(r,g,b,fuzz):
-    col=openshot.Color()
-    col.red=openshot.Keyframe(r)
-    col.green=openshot.Keyframe(g)
-    col.blue=openshot.Keyframe(b)
+
+def cKey(r, g, b, fuzz):
+    col = openshot.Color()
+    col.red = openshot.Keyframe(r)
+    col.green = openshot.Keyframe(g)
+    col.blue = openshot.Keyframe(b)
     return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
 
 
@@ -27,47 +28,72 @@ def video_photo_clip(video=None, layer=None, position=None, end=None
         clip.has_audio = openshot.Keyframe(0)
     return clip
 
+
 def video_writer_init(path):
     w = openshot.FFmpegWriter(path)
     w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
     w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
-        openshot.Fraction(1, 1), False, False, 3000000)
+                      openshot.Fraction(1, 1), False, False, 3000000)
     return w
 
 
-def create_video(bg, title_bg,time):
+def create_video(bg, title_bg, people, time):
     t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
     t.Open()
     
     ck = cKey(0, 254, 0, 270)
     ck_anchor = cKey(0, 255, 0, 320)
     
+    # anchor_audio = openshot.FFmpegReader(audio)  # 音檔
+    # anchor_audio.Open()
+    # anchor_clip_audio = video_photo_clip(video=anchor_audio, layer=1, scale_x=0.59, scale_y=0.59, location_x=-0.04, location_y=-0.04, position=0, end=anchor_audio.info.duration, ck=None, audio=True)
+    # print(anchor_audio.info.duration)
+    # t.AddClip(anchor_clip_audio)
+    # anchor_audio.Close()
+    
     anchor_video = openshot.FFmpegReader(bg)  # 影片
     anchor_video.Open()
-    anchor_clip_video = video_photo_clip(video=anchor_video, layer=2, scale_x=0.59, scale_y=0.59, location_x=-0.04, location_y=-0.04, position=0, end=5, ck=ck_anchor, audio=True)
+    anchor_clip_video = video_photo_clip(video=anchor_video, layer=3, scale_x=0.59, scale_y=0.59, location_x=-0.04,
+                                         location_y=-0.04, position=0, end=20, ck=None, audio=False)
     t.AddClip(anchor_clip_video)
     anchor_video.Close()
     
     anchor_template = openshot.FFmpegReader(title_bg)  # 樣板
     anchor_template.Open()
-    anchor_clip_template = video_photo_clip(video=anchor_template, layer=1, scale_x=1, scale_y=1,
-                                    location_x=0, location_y=0, position=0, end=5, ck=ck_anchor, audio=True)
+    anchor_clip_template = video_photo_clip(video=anchor_template, layer=2, scale_x=1, scale_y=1, location_x=0,
+                                            location_y=0, position=0, end=20, ck=None, audio=True)
     t.AddClip(anchor_clip_template)
     anchor_template.Close()
     
-    w = video_writer_init("/app/output/op.mp4")
+    anchor_people = openshot.FFmpegReader(people)  # 主播
+    anchor_people.Open()
+    anchor_clip_people = video_photo_clip(video=anchor_people, layer=4, scale_x=0.8, scale_y=0.8, location_x=0.38,
+                                          location_y=0.35, position=0, end=20, ck=ck_anchor, audio=True)
+    t.AddClip(anchor_clip_people)
+    anchor_people.Close()
+    
+    w = video_writer_init("/app/output/Q-3.mp4")
     w.Open()
     # frames = int(t.info.fps)*int(time)
-    frames = t.info.fps.ToInt()*int(time)
-    print('結果一',frames)
+    frames = t.info.fps.ToInt() * int(time)
+    print('結果一', frames)
     for n in range(frames):
         # tmp = n%(int(t.info.fps)*3) +int(t.info.fps)*int(2)
-        f=t.GetFrame(n)
+        f = t.GetFrame(n)
         w.WriteFrame(f)
-
+    
     t.Close()
     w.Close()
 
 
 if __name__ == '__main__':
-    create_video("/app/input/movie_main/LOGO_OP_4.mp4","/app/examples/主播示意1.mp4",4)
+    # data = pd.read_csv('/Users/zooeytsai/Documents/導盲犬協會.csv')
+    # for i in data:
+    #     if i['是否要場景'] == '是':
+    #         create_video(f"/app/input/movie_main/{i['素材']}", "/app/input/bg/串場樣板1.mp4", "/app/input/people/peggy1_1.mp4", f"/app/input/movie_main/{i['音檔']}")
+    #     else:
+    #         create_video(f"/app/input/movie_main/{i['素材']}", f"/app/input/movie_main/{i['音檔']}")
+    create_video("/app/input/movie_main/2022_04_15_165219.jpg", "/app/input/bg/樣板.mp4",
+                 "/app/input/people/nwfggznfiy.mp4",
+                 20)
+

+ 392 - 0
openshotExample3.py

@@ -0,0 +1,392 @@
+import openshot
+import re
+from PIL import Image,ImageDraw,ImageFont
+import pandas as pd
+import os
+import cv2
+import numpy as np
+import moviepy.editor as mp
+import time
+import pysrt
+import shutil
+import rpyc
+import random
+import string
+import requests
+from bs4 import BeautifulSoup
+
+
+def cKey(r,g,b,fuzz):
+    col=openshot.Color()
+    col.red=openshot.Keyframe(r)
+    col.green=openshot.Keyframe(g)
+    col.blue=openshot.Keyframe(b)
+    return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
+
+def video_writer_init(path):
+    w = openshot.FFmpegWriter(path)
+    w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
+    w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
+        openshot.Fraction(1, 1), False, False, 3000000)
+    return w
+
+def video_photo_clip(video=None,layer=None, position=None, end=None
+    ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
+    clip = openshot.Clip(video)
+    clip.Layer(layer)
+    clip.Position(position)
+    clip.End(end)
+    clip.scale_x=openshot.Keyframe(scale_x)
+    clip.scale_y=openshot.Keyframe(scale_y)
+    clip.location_x=openshot.Keyframe(location_x)
+    clip.location_y=openshot.Keyframe(location_y)
+    
+    if ck!=None:
+        clip.AddEffect(ck)
+    if audio==True:
+        clip.has_audio=openshot.Keyframe(1)
+    else:
+        clip.has_audio=openshot.Keyframe(0)
+    return clip
+
+def trim_punctuation(s):
+    pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+'
+    pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
+    res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
+    return res
+
+
+def randomString(stringLength=10):
+    letters = string.ascii_lowercase
+    return ''.join(random.choice(letters) for i in range(stringLength))
+
+
+def mp3_to_anchor(fname):
+
+    conn = rpyc.classic.connect("192.168.192.221",18812)
+    fr=open(fname,'rb')
+    ropen = conn.builtins.open
+    randname=randomString(10)
+    finalname=randomString(10)
+
+    fw=ropen('/tmp/'+randname+'.mp4','wb')
+    fw.write(fr.read())
+    fw.close()
+    ros = conn.modules.os
+    ros.system('/root/to_video/p9.sh '+randname+".mp4 "+finalname+".mp4")
+    return 'http://192.168.192.221/video/'+finalname+'.mp4'
+#    conn.execute('import os')
+
+
+def download_mp4(url,name):
+    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
+    with open('input/bg/ai_spokesgirl_%s.mp4'%(name),'wb') as f:
+        r = requests.get(url, headers=headers, stream=True)
+        if r.status_code == 404:
+            return False
+        for chunk in r.iter_content(chunk_size=1024):
+            if chunk:
+                f.write(chunk)
+    return True
+
+
+                
+   
+ 
+
+def text_to_short_vedio_create(title_ad="input/people/peggy1_1_long.mp4",op="input/bg/LOGO_OP_4.mp4",ed="input/bg/LOGO_ED.mp4",bg="input/bg/串場樣板1_long.mp4"):
+    t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
+    t.Open()
+    
+    # 去背參數
+    # ck=cKey(0,254,0,270)
+    ck=cKey(0,255,0,320)
+    ck_anchor=None
+    #時間
+    time_= 0
+    
+	
+    csv_use=pd.read_csv("導盲犬協會.csv")
+    csv_use=csv_use.dropna(how='all')
+    csv_use.reset_index(inplace=True)
+
+    anchor_op = openshot.FFmpegReader(op)
+    anchor_op.Open()
+    anchor_clip_op = video_photo_clip(video=anchor_op,layer=2,scale_x=1,scale_y=1,
+            location_x=0,location_y=0,position=time_, end=anchor_op.info.duration,ck=ck_anchor,audio=True)
+    
+    t.AddClip(anchor_clip_op)
+    anchor_op.Close()
+    time_+=anchor_op.info.duration
+
+
+    for i in range(len(csv_use)):
+    # for i in range(3):
+        pwd_=str(csv_use.loc[i,['音檔']].values[0])
+
+        
+        locals()['anchor_music'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材/"+pwd_)
+        locals()['anchor_music'+str(i)].Open()
+        locals()['anchor_music'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music'+str(i)],layer=3,scale_x=0,scale_y=0,
+                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=True)
+        t.AddClip(locals()['anchor_music'+str(i)+'clip'])
+        locals()['anchor_music'+str(i)].Close()
+
+        if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
+            scale_x_use = 0.59
+            scale_y_use = 0.59
+            t1 = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
+            t1.Open()
+            pwd_=str(csv_use.loc[i,['音檔']].values[0])
+
+        
+            locals()['anchor_music_t'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材/"+pwd_)
+            locals()['anchor_music_t'+str(i)].Open()
+            locals()['anchor_music_t'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music_t'+str(i)],layer=3,scale_x=0,scale_y=0,
+                   location_x=0,location_y=0,position=0, end=locals()['anchor_music_t'+str(i)].info.duration,ck=ck_anchor,audio=True)
+            t1.AddClip(locals()['anchor_music_t'+str(i)+'clip'])
+            locals()['anchor_music_t'+str(i)].Close()
+
+            w = video_writer_init("input/people/%s.mp4"%('anchor_music'+str(i)))
+            w.Open()
+            frames = int(t1.info.fps)*int(locals()['anchor_music_t'+str(i)].info.duration)
+            for n in range(frames):
+                f=t1.GetFrame(n)
+                w.WriteFrame(f)
+            w.Close()
+            t1.Close()
+
+            fname=mp3_to_anchor("input/people/%s.mp4"%('anchor_music'+str(i)))
+            # print(fname)
+            time.sleep(180)
+            while True:
+                result = download_mp4(fname,str(i))
+                if result:
+                    break
+                print('等待...')
+                time.sleep(60)
+
+
+        else:
+            scale_x_use = 1
+            scale_y_use = 1
+
+        choose=str(csv_use.loc[i,['素材']].values[0]).split(".")[-1]
+        pwd_p1=str(csv_use.loc[i,['素材']].values[0])
+
+
+
+
+        if choose == 'mp4':
+            locals()['anchor'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材/"+pwd_p1)
+            locals()['anchor'+str(i)].Open()
+            locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
+               location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
+        
+            t.AddClip(locals()['anchor'+str(i)+'clip'])
+            locals()['anchor'+str(i)].Close()
+
+            if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
+                locals()['anchor_ad'+str(i)] = openshot.FFmpegReader('input/bg/ai_spokesgirl_%s.mp4'%(str(i)))
+                locals()['anchor_ad'+str(i)].Open()
+                locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
+                   location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
+                t.AddClip(locals()['anchor_clip_ad'+str(i)])
+                locals()['anchor_ad'+str(i)].Close()
+
+                locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
+                locals()['anchor_bg'+str(i)].Open()
+                locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
+                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
+                t.AddClip(locals()['anchor_clip_bg'+str(i)])
+                locals()['anchor_bg'+str(i)].Close()
+         
+
+        elif choose == 'jpg':
+            locals()['anchor'+str(i)] = openshot.QtImageReader("導盲犬協會影片素材/"+pwd_p1)
+            locals()['anchor'+str(i)].Open()
+            locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
+               location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
+            t.AddClip(locals()['anchor'+str(i)+'clip'])
+            locals()['anchor'+str(i)].Close()
+            if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
+                locals()['anchor_ad'+str(i)] = openshot.FFmpegReader("input/bg/ai_spokesgirl_%s.mp4"%(str(i)))
+                locals()['anchor_ad'+str(i)].Open()
+                locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
+                   location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
+                t.AddClip(locals()['anchor_clip_ad'+str(i)])
+                locals()['anchor_ad'+str(i)].Close()
+
+                locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
+                locals()['anchor_bg'+str(i)].Open()
+                locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
+                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
+                t.AddClip(locals()['anchor_clip_bg'+str(i)])
+                locals()['anchor_bg'+str(i)].Close()
+
+        time_+=locals()['anchor_music'+str(i)].info.duration
+
+
+
+
+
+            
+ 
+    
+
+    
+    anchor_ed = openshot.FFmpegReader(ed)
+    anchor_ed.Open()
+    anchor_clip_ed = video_photo_clip(video=anchor_ed,layer=2,scale_x=1,scale_y=1,
+            location_x=0,location_y=0,position=time_, end=anchor_ed.info.duration,ck=ck_anchor,audio=True)
+    time_+=anchor_ed.info.duration
+    t.AddClip(anchor_clip_ed)
+    anchor_ed.Close()
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+    
+    
+    w = video_writer_init("output/test6.mp4")
+    w.Open()
+    
+    frames = int(t.info.fps)*int(time_)
+    for n in range(frames):
+        f=t.GetFrame(n)
+        w.WriteFrame(f)
+
+    t.Close()
+    w.Close()
+#文字轉圖片
+def txt2image(content, save_target,lang='zh',size=26,fon="font/DFT_B7.ttc"):
+    unicode_text = trim_punctuation(content)
+    font = ''
+    if lang=='zh':
+        font = ImageFont.truetype(font=fon, size=size)
+    else :
+        font = ImageFont.truetype(font="font/arial.ttf", size=size)
+    
+    W, H = (1280,500)
+    canvas = Image.new('RGB', (W, H), "#00FF00")
+    draw = ImageDraw.Draw(canvas)
+    
+    text= content
+    if "\n" in text:
+        w, h = draw.textsize(text.split("\n")[0],font = font)
+        #draw.text(((W-w)/2,0), text[0:18],'black', font)
+        text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white')
+        w, h = draw.textsize(text.split("\n")[1],font = font)
+        #draw.text(((W-w)/2,h+2), text[18:],'black', font)
+        text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white')
+    else:
+        w, h = draw.textsize(content,font = font)
+        #draw.text(((W-w)/2,0), text,'black', font)
+        text_border(draw,(W-w)/2,0,text,font,'black','white')
+    canvas.save(save_target, "PNG")
+
+def text_border(draw,x,y,text,font,shadowcolor,fillcolor):
+    draw.text((x-1, y), text, font=font, fill=shadowcolor)
+    draw.text((x+1, y), text, font=font, fill=shadowcolor)
+    draw.text((x, y-1), text, font=font, fill=shadowcolor)
+    draw.text((x, y+1), text, font=font, fill=shadowcolor)
+
+    draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
+    draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
+    draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
+    draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
+    # thicker border
+    draw.text((x-2, y-2), text, font=font, fill=shadowcolor)
+    draw.text((x+2, y-2), text, font=font, fill=shadowcolor)
+    draw.text((x-2, y+2), text, font=font, fill=shadowcolor)
+    draw.text((x+2, y+2), text, font=font, fill=shadowcolor)
+
+    # now draw the text over it
+    draw.text((x, y), text, font=font, fill=fillcolor)
+
+def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = None
+                        ,vedio_time = 30,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"):
+    t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
+    t.Open()
+    
+    # 去背參數
+    ck = cKey(0, 254, 0, 270)
+    ck_anchor = cKey(0, 255, 0, 320)
+
+    anchor = openshot.FFmpegReader(mp4_file)
+    anchor.Open()
+    anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1,
+            location_x=0,location_y=0,position=0, end=vedio_time,audio=True)
+    t.AddClip(anchor_clip)
+    anchor.Close()
+    number = 0
+
+    sound_srt_file = ""
+    #音檔自動產生srt(逐字稿)
+    if ".srt" in sound_file:
+        sound_srt_file = sound_file
+    elif not sound_file is None:
+        cmd = "autosub -S zh-TW -D zh-TW " + sound_file
+        os.system(cmd)
+        sound_srt_file = sound_file.split('.')[0] + ".srt"
+    
+
+    #開啟srt檔
+    try:
+        subs = pysrt.open(sound_srt_file)
+        text_form = []
+        for context in subs:
+            #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
+            end = context.end-context.start
+            end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
+            start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
+            text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
+
+        for text_tmp in text_form:
+            file_name = "tmp/save_target_" + str(number) + ".png"
+            txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
+            exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number))
+            exec('text_anchor_{}.Open()'.format(number))
+            exec('text_anchor_{}.Open()'.format(number))
+            exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
+                    location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
+            exec('t.AddClip(text_anchor_clip_{})'.format(number))
+            exec('text_anchor_{}.Close()'.format(number))
+            number = number+1
+    except:
+        print("無法開啟srt檔案(字幕產生失敗)")
+
+    w = video_writer_init(output_filename)
+    w.Open()
+
+    frames = int(t.info.fps)*int(vedio_time)
+    for n in range(frames):
+        f=t.GetFrame(n)
+        w.WriteFrame(f)
+
+    t.Close()
+    w.Close()
+
+    #刪除暫存檔案
+    shutil.rmtree('tmp')
+    os.mkdir('tmp')
+
+
+if __name__ == '__main__':
+	# picture_change()
+	text_to_short_vedio_create()
+	text_to_short_vedio(mp4_file = "output/test6.mp4",sound_file ='output/test6.mp4',vedio_time =284,text_font ="font/DFT_B7.ttc")
+    print("down")