Bläddra i källkod

add fb youtube tools

ming 3 år sedan
incheckning
dd41186eae

+ 17 - 0
crawl/YTcrawl.py

@@ -0,0 +1,17 @@
+import csv
+from youtube_search import YoutubeSearch
+
+results = YoutubeSearch('微電影', max_results=100).to_dict()
+
+print(results[0].keys())
+print(len(results))
+with open('youtubeReport.csv', 'w', newline='',encoding='UTF-8') as csvfile:
+    writer = csv.writer(csvfile)
+    writer.writerow(['channel', 'title', 'views'])
+    
+    for r in results:
+        print([r['channel'], r['title'], r['views']])
+        writer.writerow([r['channel'], r['title'], r['views']])
+    
+
+

+ 71 - 0
crawl/list_channel_YTcrawl - 複製.py

@@ -0,0 +1,71 @@
+
+import csv
+
+from youtubesearchpython import *
+from tqdm import tqdm
+import time
+pages = 50
+max_channels = 8 * pages
+
+search = ChannelsSearch('微電影', limit = 8)
+c_list = []
+print('Filtering Channels................')
+for p in tqdm(range(pages)):
+    for channel in search.result()['result']:
+      
+        if channel['subscribers'] is None:
+            continue
+
+        subscribes = channel['subscribers'].replace(' subscribers','').replace(' subscriber','')
+        
+        if 'K' in subscribes:
+            subscribes = subscribes.replace('K','')
+            if '.' in subscribes :
+                subscribes=subscribes.replace('.','') 
+                subscribes+='00'
+            else:
+                subscribes+='000'
+
+        if 'M' in subscribes:
+            subscribes = subscribes.replace('M','')
+            if '.' in subscribes :
+                zeros = len(subscribes)-subscribes.index('.')-1
+                subscribes=subscribes.replace('.','') 
+                subscribes += '000000'[:6-zeros]
+
+        if int(subscribes)>100:
+            c_list.append(channel)
+    search.next()
+    
+
+print('There is ',len(c_list),' channels')
+
+v_list = []
+
+
+
+for ch in c_list:
+    channel_id = ch['id']
+    try:
+        search = ChannelSearch('*', channel_id)
+        result = search.result()['result']
+        for r in result:
+            print(r.keys())
+            print(r['channel']['name'])
+
+
+    except:
+        print(channel_id)
+'''
+
+
+with open('youtubeReport.csv', 'w', newline='') as csvfile:
+    writer = csv.writer(csvfile)
+    writer.writerow(['channel', 'title', '體重'])
+
+    writer.writerow(['令狐沖', 175, 60])
+    for r in results:
+        print(r.keys())
+        print(r['title'])
+
+'''

+ 110 - 0
crawl/official.py

@@ -0,0 +1,110 @@
+import urllib.request #importing to use its urlencode function
+import json #for decoding a JSON response
+#
+def get_infoString(ChannelIdentifier):
+    API_KEY = 'AIzaSyDuwkgFVRLOa3gkBU4aeDjVBuogLQ1ZZXE'                                 # What? How? Learn here: https://www.youtube.com/watch?v=JbWnRhHfTDA 
+    ch_url = 'https://www.googleapis.com/youtube/v3/channels?part=statistics&id='+ChannelIdentifier+'&key='+API_KEY
+    ch_response = urllib.request.urlopen(ch_url) #makes the call to YouTube
+    ch_info = json.load(ch_response)
+    subscribes = ch_info['items'][0]['statistics']['subscriberCount']
+    url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&channelId='+ChannelIdentifier+'&maxResults=150&type=video&key='+API_KEY
+    response = urllib.request.urlopen(url) #makes the call to YouTube
+    videos = json.load(response) #decodes the response so we can work with it
+    videoMetadata = [] #declaring our list
+    for video in videos['items']:
+        if video['id']['kind'] == 'youtube#video':
+            videoMetadata.append(video['id']['videoId']) #Appends each videoID and link to our list
+    #
+    # In this second part, a loop will run through the listvideoMetadata
+    # During each step the details a specific video are retrieved and displayed
+    # The structure of the API-return can be tested with the API explorer (which you can excecute without OAuth):
+    # https://developers.google.com/apis-explorer/#p/youtube/v3/youtube.videos.list?part=snippet%252CcontentDetails%252Cstatistics&id=Ks-_Mh1QhMc&_h=1&
+    #
+    vlist = []
+    for metadata in tqdm(videoMetadata,leave=False):
+        SpecificVideoID = metadata
+        SpecificVideoUrl = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+SpecificVideoID+'&key='+API_KEY
+        response = urllib.request.urlopen(SpecificVideoUrl) #makes the call to a specific YouTube
+        videos = json.load(response) #decodes the response so we can work with it
+        videoMetadata = [] #declaring our list
+        for video in videos['items']: 
+            
+            if video['kind'] == 'youtube#video':
+                #print(video['statistics'].keys())
+                '''
+                
+                print("Upload date:                "+video['snippet']['publishedAt'])        # Here the upload date of the specific video is listed
+                print("Number of views:        "+video['statistics']['viewCount'])     # Here the number of views of the specific video is listed
+                print("Number of likes:        "+video['statistics']['likeCount'])    # etc
+                    #print("Number of dislikes: "+video['statistics']['dislikeCount'])
+                print("Number of favorites:"+video['statistics']['favoriteCount'])
+                print("Number of comments: "+video['statistics']['commentCount'])
+                print("\n")
+                '''
+                commentCount = 0
+                try:
+                    commentCount=video['statistics']['commentCount']
+                except:
+                    pass
+                likeCount=0
+                try:
+                    likeCount = video['statistics']['likeCount']
+                except:
+                    pass
+                
+                vlist.append([video['snippet']['channelTitle'],subscribes,video['snippet']['description'],video['snippet']['title']
+                ,video['snippet']['publishedAt'],video['statistics']['viewCount'],likeCount
+                ,video['statistics']['favoriteCount'],commentCount])
+    return vlist
+
+
+import csv
+
+from youtubesearchpython import *
+from tqdm import tqdm
+import time
+pages = 100
+max_channels = 8 * pages
+
+search = ChannelsSearch('科技', limit = 8)
+c_list = []
+print('Filtering Channels................')
+for p in tqdm(range(pages)):
+    for channel in search.result()['result']:
+      
+        if channel['subscribers'] is None:
+            continue
+
+        subscribes = channel['subscribers'].replace(' subscribers','').replace(' subscriber','')
+        
+        if 'K' in subscribes:
+            subscribes = subscribes.replace('K','')
+            if '.' in subscribes :
+                subscribes=subscribes.replace('.','') 
+                subscribes+='00'
+            else:
+                subscribes+='000'
+
+        if 'M' in subscribes:
+            subscribes = subscribes.replace('M','')
+            if '.' in subscribes :
+                zeros = len(subscribes)-subscribes.index('.')-1
+                subscribes=subscribes.replace('.','') 
+                subscribes += '000000'[:6-zeros]
+
+        if int(subscribes)>500:
+            c_list.append(channel)
+    search.next()
+
+
+with open('youtubeReport.csv', 'w', newline='',encoding='UTF-8') as csvfile:
+    writer = csv.writer(csvfile)
+    writer.writerow(['channelTitle','subscribes', 'description', 'videoTitle','publishedAt','viewCount','likeCount','favoriteCount','commentCount'])
+
+
+    for ch in tqdm(c_list):
+        channel_id = ch['id']
+        vlist = get_infoString(channel_id)
+
+        for v in vlist:
+            writer.writerow([v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]])

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 17442 - 0
crawl/youtubeReport.csv


BIN
crawl/youtube_search-fork-master.zip


+ 26 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/.circleci/config.yml

@@ -0,0 +1,26 @@
+# Python CircleCI 2.0 configuration file
+version: 2
+jobs:
+  build:
+    docker:
+      - image: circleci/python:3.8
+
+    working_directory: ~/repo
+
+    steps:
+      # Step 1: obtain repo from GitHub
+      - checkout
+      # Step 2: create virtual env and install dependencies
+      - run:
+          name: install dependencies
+          command: |
+            python3 -m venv venv
+            . venv/bin/activate
+            pip install -r requirements.txt
+      # Step 3: run linter and tests
+      - run:
+          name: run tests
+          command: |
+            . venv/bin/activate
+            pip install pytest pytest-cov
+            pytest -v --cov=youtube_search

+ 107 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/.gitignore

@@ -0,0 +1,107 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# vscode
+.vscode/

+ 21 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 joe tats
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 70 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/README.md

@@ -0,0 +1,70 @@
+[![Generic badge](https://img.shields.io/badge/PyPi-1.2.1-green.svg)](https://pypi.org/project/youtube-search-fork/)
+[![Generic badge](https://img.shields.io/badge/Mantained-NO-red.svg)](https://pypi.org/project/youtube-search-fork/)
+
+
+Check out the new version [here](https://github.com/ytorg/yotter-api)
+
+
+# youtube_search-fork
+This fork adds the ability to search for channels, latest content of a channel, channel information, channel vidoes, and adds videos published date and channelId.
+It works scrapping the Youtube pages.
+
+- [x] Search on youtube avoiding the use their heavily rate-limited API. 
+- [x] No need for Google account.
+- [x] No limits.
+- [x] Reasonably fast.
+
+## Installation
+`pip install youtube-search-fork`
+
+## Example Usage
+For a basic search (and all of the current functionality), you can use the search tool as follows:
+
+* Get Youtube channel videos and channel info and parse it to JSON.
+```python
+from youtube_search import YoutubeSearch
+
+videos = YoutubeSearch('search terms', max_results=10).videos_to_json()
+channels = YoutubeSearch('search terms', max_results=10).channels_to_json()
+```
+> This will get all the videos from the search query into `videos` and all the found channels into `channels` and parse it in a JSON format.
+
+
+* Get Youtube search videos and channels and parse it to a dict.
+```python
+from youtube_search import YoutubeSearch
+
+videos = YoutubeSearch('search terms', max_results=10).videos_to_dict()
+channels = YoutubeSearch('search terms', max_results=10).channels_to_dict()
+```
+> This will get all the videos from the search query into `videos` and all the found channels into `channels` and parse it in a Python dictionary.
+
+
+* Get a specific channel info and videos:
+```python
+from youtube_search import YoutubeSearch
+data = YoutubeSearch.channelInfo(channel_id)
+
+
+channelInfo = data[0]
+channelVideoList = data[1]
+```
+> This will get all the videos and their data from the specified channel (channelID needed) into `channelVideoList` and the channel data (suscribers, username, etc) into `channelVideoList`.
+
+
+* You can request **only** the channel info:
+```python
+from youtube_search import YoutubeSearch
+
+# The second (and optional) parameter is `includeVideos`. By default is se to True.
+channelOnlyData = YoutubeSearch.channelInfo(channel_id, False)
+channelInfo = data[0]
+```
+> Same as before, but just the channel data.
+
+## Formats
+* Channel info format:
+```{'id': 'UCjr2bPAyPV.....8Q', 'name': 'Channel Name', 'avatar': 'https://yt3.ggpht.com/a/AATXAJzuPoT_2M54dus-P2qXgnbY0MPxbkzvwv3muxQn=s176-c-k-c0x00ffffff-no-rj', 'subCount': '24K'}```
+
+* Video list format:
+```[{'videoTitle': 'Video title goes here', 'id': 'video_id_here', 'channelName': 'Channel Name', 'timeStamp': '17 hours ago', 'views': '13,661 views', 'videoThumb': 'https://i.ytimg.com/vi/3eC4Hp4MNBA/hqdefault.jpg?sqp=-oaymwEiCKgBEF5IWvKriqkDFQgBFQAAAAAYASUAAMhCPQCAokN4AQ==&rs=AOn4....5o_2mazZd40g_xc_3917M5w', 'channelUrl': '/channel/UCjr2bPA.......gT3W8Q'}, {...}, {...}, ...]```

+ 2 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/__init__.py

@@ -0,0 +1,2 @@
+# Version
+__version__ = "1.1.0"

BIN
crawl/youtube_search-fork-master/youtube_search-fork-master/requirements.txt


+ 30 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/setup.py

@@ -0,0 +1,30 @@
+import pathlib
+from setuptools import setup
+
+# The directory containing this file
+HERE = pathlib.Path(__file__).parent
+
+# The text of the README file
+README = (HERE / "README.md").read_text()
+
+# This call to setup() does all the work
+setup(
+    name="youtube-search-fork",
+    version="1.2.5",
+    description="Search on youtube avoiding the use their heavily rate-limited API. Fork of original youtube-search by joetats",
+    long_description=README,
+    long_description_content_type="text/markdown",
+    url="https://github.com/pluja/youtube_search-fork",
+    author="Pluja",
+    author_email="pluja@r3d.red",
+    license="MIT",
+    classifiers=[
+        "License :: OSI Approved :: MIT License",
+        "Programming Language :: Python :: 3",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+    ],
+    packages=["youtube_search"],
+    include_package_data=True,
+    install_requires=["requests"],
+)

+ 22 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/test_search.py

@@ -0,0 +1,22 @@
+from .youtube_search import YoutubeSearch
+
+
+class TestSearch:
+
+    def test_init_defaults(self):
+        search = YoutubeSearch('test')
+        assert search.max_results is None
+        assert 1 <= len(search.videos)
+
+    def test_init_max_results(self):
+        search = YoutubeSearch('test', max_results=10)
+        assert 10 == search.max_results
+        assert 10 == len(search.videos)
+
+    def test_dict(self):
+        search = YoutubeSearch('test', max_results=10)
+        assert isinstance(search.videos_to_dict(), list)
+
+    def test_json(self):
+        search = YoutubeSearch('test', max_results=10)
+        assert isinstance(search.videos_to_json(), str)

+ 233 - 0
crawl/youtube_search-fork-master/youtube_search-fork-master/youtube_search/__init__.py

@@ -0,0 +1,233 @@
+import requests
+import urllib.parse
+import json
+from bs4 import BeautifulSoup as bs
+
+class YoutubeSearch:
+    def __init__(self, search_terms: str, max_results=None):
+        self.search_terms = search_terms
+        self.max_results = max_results
+
+        self.videos = self.search_videos()
+        self.channels = self.search_channels()
+
+    def channelInfo(id, includeVideos=True):
+        headers = {"Accept-Language": "en-US,en;q=0.5"}
+        encoded_search = urllib.parse.quote(id)
+        BASE_URL = "https://youtube.com"
+
+        if encoded_search[0:2] == "UC" and len(encoded_search) == 24:
+            url = f"{BASE_URL}/channel/{encoded_search}/videos"
+            response = requests.get(url, headers=headers).text
+        else:
+            url = f"{BASE_URL}/user/{encoded_search}/videos"
+            response = requests.get(url, headers=headers).text
+
+        while 'window["ytInitialData"]' not in response:
+            response = requests.get(url, headers=headers).text
+
+        results = []
+        start = (
+            response.index('window["ytInitialData"]')
+            + len('window["ytInitialData"]')
+            + 3
+        )
+        end = response.index("};", start) + 1
+        json_str = response[start:end]
+        data = json.loads(json_str)
+
+        if includeVideos:
+            videoContent = data["contents"]["twoColumnBrowseResultsRenderer"]['tabs'][1]['tabRenderer']['content'][
+                'sectionListRenderer']['contents'][0]['itemSectionRenderer'][
+                'contents'][0]['gridRenderer']['items']
+                
+        channelDetails = data["header"]['c4TabbedHeaderRenderer']
+
+        try:
+            sC = channelDetails['subscriberCountText']['simpleText'].split(" ")[0]
+        except:
+            try:
+                sC = channelDetails['subscriberCountText']['runs'][0]['text'].split(" ")[0]
+            except:
+                sC = "unavailable"
+            
+        channel = {
+            'id': id,
+            'name': channelDetails['title'],
+            'avatar': channelDetails['avatar']['thumbnails'][2]['url'],
+            'subCount': sC
+        }
+        results.append(channel)
+        
+        if includeVideos:
+            videos = []
+            for video in videoContent:
+
+                try:
+                    title=video['gridVideoRenderer']['title']['simpleText']
+                except:
+                    title=video['gridVideoRenderer']['title']['runs'][0]['text']
+                
+                timeStamp = "Unavailable"
+                try:
+                    timeStamp = video['gridVideoRenderer']['publishedTimeText']['simpleText']
+                    views = video['gridVideoRenderer']['viewCountText']['simpleText']
+                except:
+                    print(video['gridVideoRenderer']['thumbnailOverlays'])
+                    if 'UPCOMING' in str(video['gridVideoRenderer']['thumbnailOverlays'][0]):
+                        timeStamp = "Scheduled"
+                        views = "-"
+                    else:
+                        timeStamp = "Unavailable"
+                        views = "Unavailable"
+
+                print(video['gridVideoRenderer'])
+                vid = {
+                    'id': video['gridVideoRenderer']['videoId'],
+                    'videoThumb': video['gridVideoRenderer']['thumbnail']['thumbnails'][1]['url'],
+                    'videoTitle': title,
+                    'channelName': channelDetails['title'],
+                    'channelId': id,
+                    'timeStamp': timeStamp,
+                    'views': views,
+                    'channelUrl': "/channel/{}".format(id)
+                }
+                videos.append(vid)
+            results.append(videos)
+
+        return results
+
+    def search_videos(self):
+        headers = {"Accept-Language": "en-US,en;q=0.5"}
+        encoded_search = urllib.parse.quote(self.search_terms)
+        BASE_URL = "https://youtube.com"
+        url = f"{BASE_URL}/results?search_query={encoded_search}&lang=en"
+        response = requests.get(url, headers=headers).text
+        while 'window["ytInitialData"]' not in response:
+            response = requests.get(url, headers=headers).text
+        results = self.parse_html_videos(response)
+        if self.max_results is not None and len(results) > self.max_results:
+            return results[: self.max_results]
+        return results
+
+    def search_channels(self):
+        headers = {"Accept-Language": "en-US,en;q=0.5"}
+        encoded_search = urllib.parse.quote(self.search_terms)
+        BASE_URL = "https://youtube.com"
+        url = f"{BASE_URL}/results?search_query={encoded_search}"
+        response = requests.get(url, headers=headers).text
+        while 'window["ytInitialData"]' not in response:
+            response = requests.get(url, headers=headers).text
+        results = self.parse_html_channels(response)
+        if self.max_results is not None and len(results) > self.max_results:
+            return results[: self.max_results]
+        return results
+
+
+    def parse_html_channels(self, response):
+        results = []
+        start = (
+            response.index('window["ytInitialData"]')
+            + len('window["ytInitialData"]')
+            + 3
+        )
+        end = response.index("};", start) + 1
+        json_str = response[start:end]
+        data = json.loads(json_str)
+
+        datalist = data["contents"]["twoColumnSearchResultsRenderer"]["primaryContents"][
+            "sectionListRenderer"
+        ]["contents"][0]["itemSectionRenderer"]["contents"]
+
+        for channel in datalist:
+            res = {}
+            try:
+                if "channelRenderer" in channel.keys():
+                    channel_data = channel.get("channelRenderer", {})
+                    res["id"] = channel_data.get("channelId", None)
+                    res["name"] = channel_data.get("title", None).get("simpleText", None)
+                    try:
+                        res["suscriberCountText"] = channel_data.get("subscriberCountText", None).get("simpleText", None).split(" ")[0]
+                    except:
+                        res["suscriberCountText"] = "0"
+                    res["thumbnails"] = [thumb.get("url", None) for thumb in channel_data.get("thumbnail", {}).get("thumbnails", [{}]) ]
+                    res["url_suffix"] = channel_data.get("navigationEndpoint", {}).get("commandMetadata", {}).get("webCommandMetadata", {}).get("url", None)
+                    results.append(res)
+
+                if "shelfRenderer" in channel.keys():
+                    print("Has latest content")
+            except:
+                return results
+        return results
+
+    def parse_html_videos(self, response):
+        results = []
+        start = (
+            response.index('window["ytInitialData"]')
+            + len('window["ytInitialData"]')
+            + 3
+        )
+        end = response.index("};", start) + 1
+        json_str = response[start:end]
+        data = json.loads(json_str)
+
+        videos = data["contents"]["twoColumnSearchResultsRenderer"]["primaryContents"][
+            "sectionListRenderer"
+        ]["contents"][0]["itemSectionRenderer"]["contents"]
+        for video in videos:
+            res = {}
+
+            # IF IT IS A LIVESTREAM
+            if "playlistRenderer" in video.keys():
+                continue
+            
+            # IF IT IS A VIDEO:
+            if "videoRenderer" in video.keys():
+                video_data = video.get("videoRenderer", {})
+
+                # CHECK IF IT IS A LIVESTREAM (Support for livestreams will be added in the future)
+                try:
+                    if "BADGE_STYLE_TYPE_LIVE_NOW" == video_data.get("badges")[0].get('metadataBadgeRenderer').get("style"):
+                        continue
+                
+                # IF IT IS NOT A LIVESTREAM, GET THE VIDEO
+                except:
+                    res["id"] = video_data.get("videoId", None)
+                    res["thumbnails"] = [thumb.get("url", None) for thumb in video_data.get("thumbnail", {}).get("thumbnails", [{}]) ]
+                    res["title"] = video_data.get("title", {}).get("runs", [[{}]])[0].get("text", None)
+                    res["long_desc"] = video_data.get("descriptionSnippet", {}).get("runs", [{}])[0].get("text", None)
+                    res["channel"] = video_data.get("longBylineText", {}).get("runs", [[{}]])[0].get("text", None)
+                    res["duration"] = video_data.get("lengthText", {}).get("simpleText", 0)
+                    
+                    try:
+                        res["views"] = video_data.get("viewCountText", {}).get("simpleText", 0).split(" ")[0]
+                    except:
+                        if "LIVE" in str(video_data.get("thumbnailOverlays")):
+                            res["views"] = "Livestream"
+                        else:
+                            res['views'] = "unavailable"
+                    try:
+                        res['publishedText'] = video_data.get("publishedTimeText", None).get("simpleText")
+                    except:
+                        if "UPCOMING" in str(video_data.get("thumbnailOverlays")):
+                            res['publishedText'] = "Scheduled"
+                        else:
+                            res['publishedText'] = "Unavailable"
+                            
+                    res["url_suffix"] = video_data.get("navigationEndpoint", {}).get("commandMetadata", {}).get("webCommandMetadata", {}).get("url", None)
+                    res["channelId"] = video_data.get("longBylineText").get("runs")[0].get("navigationEndpoint").get("browseEndpoint").get("browseId")
+                    results.append(res)
+        return results
+
+    def videos_to_dict(self):
+        return self.videos
+
+    def channels_to_dict(self):
+        return self.channels
+
+    def videos_to_json(self):
+        return json.dumps({"videos": self.videos})
+
+    def channels_to_json(self):
+        return json.dumps({"channels": self.channels})
+

Vissa filer visades inte eftersom för många filer har ändrats