main.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. from typing import Optional
  2. from fastapi.staticfiles import StaticFiles
  3. from fastapi import FastAPI,File, UploadFile,Request,Response
  4. import util,os, math, time
  5. import openshot
  6. from pydantic import BaseModel
  7. from fastapi.templating import Jinja2Templates
  8. templates = Jinja2Templates(directory="static")
  9. app = FastAPI()
  10. app.mount("/static", StaticFiles(directory="static"), name="static")
  11. rootPath = '/app/components/'
  12. @app.get("/")
  13. def read_root(request: Request, response: Response):
  14. return templates.TemplateResponse("uploadmp3.html", {"request": request, "response": response})
  15. @app.get("/modifyScript")
  16. def read_root(request: Request, response: Response):
  17. return templates.TemplateResponse("modifyScript.html", {"request": request, "response": response})
  18. @app.get("/items/{item_id}")
  19. def read_item(item_id: int, q: Optional[str] = None):
  20. return {"item_id": item_id, "q": q}
  21. class updateScriptModel(BaseModel):
  22. name_hash:str
  23. scriptStr:str
  24. @app.post("/updateScript")
  25. def read_item(info : updateScriptModel):
  26. lines = info.scriptStr.split(',')
  27. cPath = rootPath+info.name_hash+'/'
  28. util.rewriteScript(cPath,lines)
  29. sub_dict,img_dict=filePrepare(info.name_hash)
  30. genVideo(info.name_hash,sub_dict,img_dict)
  31. return 'ok'
  32. @app.post("/uploadmp3/")
  33. async def uploadmp3(file: UploadFile = File(...)):
  34. name_hash = str(time.time()).replace('.','')
  35. cPath = rootPath+name_hash+'/'
  36. try:
  37. os.mkdir(cPath)
  38. except FileExistsError:
  39. pass
  40. with open(cPath+'speech.mp3', "wb+") as file_object:
  41. file_object.write(file.file.read())
  42. util.transScript(cPath)
  43. scripts = util.get_script(cPath)
  44. return name_hash, scripts
  45. def filePrepare(name_hash):
  46. cPath = rootPath+name_hash+'/'
  47. try:
  48. os.mkdir(cPath)
  49. except FileExistsError:
  50. pass
  51. sub_dict,img_dict = util.parse_script("script.txt")
  52. util.generate_subtitle_image_from_dict(cPath, sub_dict)
  53. for imgd in img_dict:
  54. print(imgd)
  55. util.downloadFromDrive(cPath,imgd['imgid'],imgd['index'])
  56. util.call_anchor(cPath+'speech.mp3',7)
  57. return sub_dict,img_dict
  58. def genVideo(name_hash,sub_dict,img_dict):
  59. basicPath = rootPath+'basic/'
  60. cPath = rootPath+name_hash+'/'
  61. ck=util.cKey(0,254,0,270)
  62. ck_anchor=util.cKey(0,255,1,320)
  63. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  64. t.Open()
  65. main_timer = 0
  66. LOGO_OP = openshot.FFmpegReader(basicPath+"LOGO_OP_4.mp4")
  67. LOGO_OP.Open() # Open the reader
  68. head_duration = LOGO_OP.info.duration
  69. LOGO_OP_clip = util.video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration)
  70. t.AddClip(LOGO_OP_clip)
  71. main_timer+=head_duration
  72. anchor = openshot.FFmpegReader(cPath+"/speaker.mp4")
  73. anchor.Open()
  74. anchor_clip = util.video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
  75. location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
  76. t.AddClip(anchor_clip)
  77. speech = openshot.FFmpegReader(cPath+"/speech.mp3")
  78. speech.Open()
  79. speech_clip = openshot.Clip(speech)
  80. speech_clip.Position(main_timer)
  81. speech_clip.End(anchor.info.duration)
  82. t.AddClip(speech_clip)
  83. main_timer += anchor.info.duration
  84. anchor.Close()
  85. speech.Close()
  86. sub_img_list = [None] * len(sub_dict)
  87. sub_clip_list = [None] * len(sub_dict)
  88. for sub_obj in sub_dict:
  89. idx = int(sub_obj['index'])
  90. sub_img_list[idx] = openshot.QtImageReader(cPath +str(idx)+'.png')
  91. sub_img_list[idx].Open()
  92. sub_clip_list[idx] = util.video_photo_clip(vid=sub_img_list[idx], layer=5,location_x=0.069, location_y=0.89
  93. ,position=head_duration+sub_obj['start'],end=sub_obj['duration'])
  94. t.AddClip(sub_clip_list[idx])
  95. sub_img_list[idx].Close()
  96. img_list = [None] * len(img_dict)
  97. img_clip_list = [None] * len(img_dict)
  98. for img_d in img_dict:
  99. idx = int(img_d['index'])
  100. print(cPath +str(idx) +'img.jpg')
  101. try:
  102. img_list[idx] = openshot.QtImageReader(cPath +str(idx) +'img.jpg')
  103. img_list[idx].Open()
  104. except:
  105. img_list[idx] = openshot.QtImageReader(cPath +str(idx) +'img.png')
  106. img_list[idx].Open()
  107. img_clip_list[idx] = util.video_photo_clip(vid=img_list[idx], layer=3
  108. ,position=head_duration+img_d['start'],end=img_d['duration'])
  109. t.AddClip(img_clip_list[idx])
  110. img_list[idx].Close()
  111. w = util.video_writer_init("myraw.mp4")
  112. w.Open()
  113. frames = int(t.info.fps)*int(main_timer)
  114. for n in range(frames):
  115. f=t.GetFrame(n)
  116. w.WriteFrame(f)
  117. t.Close()
  118. w.Close()