Here is your command line to download with N_m3u8DL-RE.exe:
Code:N_m3u8DL-RE.exe -M format=mp4 -sv best -sa lang=fr:for=best --save-name "Stromae : multitude le film le concert evenement" --key 5ebe19fecc035473b81746129566b598:c5bbe36cc23af8604c4366221058d26a --key 42693d356b0b5335815f03bb5e0b0d84:70d305eccb9089574d6e05d6633b70a1 --key 983b4b1bc2e056c2b39defecf2cf5278:f3a868e9c1291cfed676643b3fdbdc6d "https://vod-das.cdn-0.diff.tf1.fr/eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjaXAiOiI5MC40Ni4yMy4yMDgiLCJjbWNkIjoiIiwiZXhwIjoxNzM3MTk3NTIwLCJnaWQiOiIxOTRlMTIyNTc1OTU0YWZmYjMyOThjNmJkYWFjZTY5NiIsImlhdCI6MTczNzE4MzEyMCwiaXNzIjoiZGVsaXZlcnkiLCJtYXhiIjoyODAwMDAwLCJzdGVtIjoiLzIvVVNQLTB4MC84NC85NC8xNDIxODQ5NC9zc20vYWZlMzg3NWNhMmZiZmIzZjFhZGEzYTc3ZDhlZTVhMGQ3MmMzZDIzZWI2ZWNmMDdhZjZjMTJkZTc3NTFkMDVhNy5pc20vMTQyMTg0OTQubXBkIiwic3ViIjoiMTk0ZTEyMjU3NTk1NGFmZmIzMjk4YzZiZGFhY2U2OTYifQ.QjoI-3i1a84oxLzBdAvYpHJo5qYscTGOw_EnT5OUcAE/2/USP-0x0/84/94/14218494/ssm/afe3875ca2fbfb3f1ada3a77d8ee5a0d72c3d23eb6ecf07af6c12de7751d05a7.ism/14218494.mpd"
+ Reply to Thread
Results 31 to 41 of 41
-
-
That works. Thank you so much!
We all bleed blue from the inside.... -
If you have vinetrimmer, copy paste this code in vinetrimmer/services/tf1.py
Code:import base64import json import os import sys import time import re import urllib.parse from hashlib import md5 from datetime import datetime, timedelta from bs4 import BeautifulSoup import click import requests from vinetrimmer.objects import AudioTrack, TextTrack, Title, Tracks, VideoTrack from vinetrimmer.objects.tracks import MenuTrack from vinetrimmer.services.BaseService import BaseService from vinetrimmer.config import directories class TF1(BaseService): """ Service code for TF1+ (https://www.tf1.fr) \b Authorization: Login Robustness: Basic \b Tips: - Use the program ID as input: 1270 """ ALIASES = ["TF1", "tf1"] TITLE_RE = [ r"^https://www\.tf1\.fr/[^/]+/(?P<name>[^/]+)(?:/saison-(?P<season>\d+))?/(?P<episode_name>[^/#?]+)(?:#.*)?$", r"^(?P<id>[a-f0-9-]+)$" ] AUDIO_CODEC_MAP = { "AAC": "mp4a", "AC3": "ac-3", "EC3": "ec-3" } BASE_URL = "https://mediainfo.tf1.fr/mediainfocombo/" PATTERN_API_KEY = re.compile(r'\"eu1.gigya.com\",key:\"(.*?)\"') PATTERN_JS_ID = re.compile(r'main-(.*?)\.bundle\.js') AUTH_HEADERS = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 'referer': 'https://prod-player.tf1.fr/' } @staticmethod @click.command(name="TF1", short_help="https://www.tf1.fr/", help=__doc__) @click.argument("title", type=str, required=False) @click.option("-m", "--movie", is_flag=True, default=False, help="Title is a movie.") @click.option("-d", "--divertissement", is_flag=True, default=False, help="Title is a entertainment show.") @click.pass_context def cli(ctx, **kwargs): return TF1(ctx, **kwargs) def __init__(self, ctx, title, movie, divertissement): super().__init__(ctx) self.parse_result = self.parse_title(ctx, title) self.index = 0 self.movie = movie self.divertissement = divertissement self.token = None self.token_expiry = 0 self.profile = ctx.obj.profile self.vcodec = ctx.parent.params["vcodec"].lower() self.acodec = ctx.parent.params["acodec"] self.range = ctx.parent.params["range_"] self.wanted = ctx.parent.params["wanted"] self.authenticate() if movie: self.log.debug(f"Processing movie URL: {title}") response = requests.get(title) self.log.debug(f"Response content: {response.text[:200]}...") patterns = [ r'"Video:([a-f0-9-]+)"', r'data-video="([a-f0-9-]+)"', r'video/([a-f0-9-]+)', r'VideoId":"([a-f0-9-]+)"' ] for pattern in patterns: match = re.search(pattern, response.text) if match: self.program_id = match.group(1) self.log.debug(f"Found program ID: {self.program_id} using pattern: {pattern}") break if hasattr(self, 'program_id'): params = "?pver=5029001&context=MYTF1&topDomain=www.tf1.fr&platform=web&device=desktop&os=windows&osVersion=10.0&playerVersion=5.29.1&productName=mytf1&productVersion=3.38.0&browser=chrome&browserVersion=131" self.base_url = f"{self.BASE_URL}{self.program_id}{params}" self.base_url_video = f"{self.BASE_URL}{self.program_id}{params}" else: raise ValueError("Could not extract program ID from URL") else: self.series_url = title if "/videos" not in self.series_url: if self.wanted and isinstance(self.wanted, list) and len(self.wanted) > 0 and not self.divertissement: wanted_season = self.wanted[0].split('x')[0] self.series_url = f"{self.series_url}/videos/saison-{wanted_season}" else: self.series_url = f"{self.series_url}/videos" self.episodes = self.get_episode_links(self.series_url) if not self.episodes: raise ValueError("No episodes found for the given series URL") def get_api_key(self): cookie_file = os.path.join(directories.cookies, self.__class__.__name__, f"{self.profile}.txt") with open(cookie_file, "r") as file: cookie_file_txt = file.read() cookies_lines = cookie_file_txt.strip().split('\n') cookies_dict = {} for line in cookies_lines: parts = line.split('\t') if len(parts) >= 7: cookie_name = parts[5] cookie_value = parts[6] cookies_dict[cookie_name] = cookie_value target_value = "compte_ver4" for key, value in cookies_dict.items(): if value == target_value: self.api_key= key.replace('gig_bootstrap_','') return self.api_key def login(self, username, password): api_key = self.get_api_key() payload = { "loginID": username, "password": password, "apiKey": api_key, "format": "json" } response = requests.post("https://compte.tf1.fr/accounts.login", data=payload, headers=self.AUTH_HEADERS) return response.json() def authenticate(self): if self.token is None or self.token_expiry < datetime.now().timestamp(): login_response = self.login(self.credentials.username, self.credentials.password) if "UID" not in login_response: print("Login failed:", login_response) return headers_login = { 'UID': login_response['UID'], 'signature': login_response['UIDSignature'], 'timestamp': int(login_response['signatureTimestamp']), 'consent_ids': ["1", "2", "3", "4", "10001", "10003", "10005", "10007", "10013", "10015", "10017", "10019", "10009", "10011", "13002", "13001", "10004", "10014", "10016", "10018", "10020", "10010", "10012", "10006", "10008"] } responseJWT = requests.post("https://www.tf1.fr/token/gigya/web", json=headers_login) jwt_token = responseJWT.json().get('token') if responseJWT.status_code == 200 else None if jwt_token: self.uid = login_response['UID'] self.token = jwt_token self.token_expiry = (datetime.now() + timedelta(minutes=4)).timestamp() self.session.headers.update({'Authorization': f'Bearer {self.token}'}) else: print("Failed to retrieve JWT:", responseJWT.json()) def get_episode_links(self, series_url): """Extract episode links from TF1+ series page in the exact order they appear""" response = requests.get(series_url) soup = BeautifulSoup(response.text, 'html.parser') episodes = [] # Chercher tous les liens d'épisodes dans l'ordre exact d'apparition all_episodes = soup.find_all('a', {'class': 'text-primary headline-6 line-clamp-2'}) for link in all_episodes: url = link.get('href') if url: full_url = f"https://www.tf1.fr{url}" if not url.startswith('http') else url if full_url not in episodes: # Éviter les doublons tout en préservant l'ordre episodes.append(full_url) if not episodes: # Fallback : chercher dans la grille si la première méthode échoue grid = soup.find('ul', attrs={'data-testid': 'Grid'}) if grid: links = grid.find_all('a', href=True) for link in links: url = link['href'] full_url = f"https://www.tf1.fr{url}" if not url.startswith('http') else url if full_url not in episodes and '/videos/' in url: episodes.append(full_url) return episodes def _extract_episode_number(self, title): """Helper method to extract episode number from title""" match = re.search(r'[Ee]pisode[s]?\s*(\d+)', title, re.IGNORECASE) if match: return int(match.group(1)) return self.index + 1 def get_titles(self): titles = [] # Gestion des films if self.movie: program = requests.get(self.base_url, headers=self.session.headers).json() video_metadata = program if video_metadata.get('media'): titles.append(Title( id_=video_metadata['media']['id'], type_=Title.Types.MOVIE, name=video_metadata['media']['title'].replace(' ', ' '), original_lang='fr', source=self.ALIASES[0], service_data={ 'video_data': video_metadata['media'], 'mpd_url': video_metadata['delivery'].get('url'), 'base_url': self.base_url, 'program_id': self.program_id } )) return titles episode_count = 1 for episode_url in self.episodes: try: response = requests.get(episode_url) soup = BeautifulSoup(response.text, 'html.parser') match = re.search(r'"Video:([a-f0-9-]+)"', response.text) if not match: self.log.warning(f"Could not find program ID for episode: {episode_url}") continue program_id = match.group(1) params = "?pver=5029001&context=MYTF1&topDomain=www.tf1.fr&platform=web&device=desktop&os=windows&osVersion=10.0&playerVersion=5.29.1&productName=mytf1&productVersion=3.38.0&browser=chrome&browserVersion=131" episode_metadata_url = f"{self.BASE_URL}{program_id}{params}" episode_metadata = requests.get(episode_metadata_url, headers=self.session.headers).json() if not episode_metadata.get('media'): self.log.warning(f"No media data found for episode: {episode_url}") continue media_title = episode_metadata['media']['title'] show_name = episode_metadata['media'].get('programName', '').replace(' ', ' ') if self.divertissement: # Mode divertissement : numérotation séquentielle simple season_num = 1 episode_num = episode_count episode_name = media_title episode_count += 1 else: # Mode normal : traitement des séries TV standard pattern = r"^.*? - S(\d{2}) E(\d{2}) - (.+)$" match = re.search(pattern, media_title) if match: season_num = int(match.group(1)) episode_num = int(match.group(2)) episode_name = match.group(3) else: season_match = re.search(r'S(\d+)\s*E(\d+)', media_title, re.IGNORECASE) if season_match: season_num = int(season_match.group(1)) episode_num = int(season_match.group(2)) else: season_num = 1 episode_num = len(titles) + 1 episode_name = media_title title = Title( id_=episode_metadata['media']['id'], type_=Title.Types.TV, name=show_name, season=season_num, episode=episode_num, episode_name=episode_name, original_lang='fr', source=self.ALIASES[0], service_data={ 'video_data': episode_metadata['media'], 'mpd_url': episode_metadata['delivery'].get('url'), 'base_url': episode_metadata_url, 'program_id': program_id } ) if title.is_wanted(self.wanted): titles.append(title) except Exception as e: self.log.error(f"Error processing episode {episode_url}: {str(e)}") continue return titles def get_tracks(self, title): """ Get the tracks from the title. Processes MPD manifest to identify descriptive audio and SDH subtitles. """ self.authenticate() video_metadata = self.session.get(title.service_data['base_url'], headers=self.session.headers).json() mpd_url = video_metadata['delivery']['url'] tracks = Tracks.from_mpd( url=mpd_url, session=self.session, source=self.ALIASES[0] ) DASH_NS = "{urn:mpeg:dash:schema:mpd:2011}" TVA_NS = "{urn:tva:metadata:cs:AudioPurposeCS:2007}" audio_tracks = [t for t in tracks.audios] tracks.audios.clear() for track in audio_tracks: adaptation_set = track.extra[1] if isinstance(track.extra, tuple) and len(track.extra) > 1 else None if adaptation_set is not None: labels = adaptation_set.findall(f"{DASH_NS}Label") or adaptation_set.findall("Label") accessibility = adaptation_set.findall(f"{DASH_NS}Accessibility") or adaptation_set.findall("Accessibility") roles = adaptation_set.findall(f"{DASH_NS}Role") or adaptation_set.findall("Role") is_ad = False for label in labels: if label.text and "audiodescription" in label.text.lower(): is_ad = True break if not is_ad: for acc in accessibility: if (acc.get('value') == '1' or acc.get('schemeIdUri') == TVA_NS[1:-1] and acc.get('value') == '1'): is_ad = True break if not is_ad: for role in roles: if role.get('value') == 'alternate': is_ad = True break if is_ad: track.descriptive = True tracks.add(track) text_tracks = [t for t in tracks.subtitles] tracks.subtitles.clear() for track in text_tracks[-2:]: if track.codec == "vtt": adaptation_set = track.extra[1] if isinstance(track.extra, tuple) and len(track.extra) > 1 else None if adaptation_set is not None: roles = adaptation_set.findall(f"{DASH_NS}Role") or adaptation_set.findall("Role") for role in roles: if role.get('value') == 'caption': track.sdh = True break tracks.add(track) return tracks def get_chapters(self, title): self.authenticate() # Pas de chapitre dans le MPD, on skip return [] def certificate(self, **kwargs): return None def license(self, challenge, track, **_): self.authenticate() self.session.headers.update({ 'x-dt-auth-token': self.token, }) license_response = self.session.post("https://widevine-proxy-m.prod.p.tf1.fr/proxy", data=challenge).content return license_response
-
Command need to be used :
Series : poetry run vt dl -al fr -sl fr -q 720 tf1 https://www.tf1.fr/tf1/kallys-mashup-la-voix-de-la-pop-835
Movies : poetry run vt dl -al fr -ad -sl fr -q 720 tf1 -m https://www.tf1.fr/tf1/celibataire-ou-presque-that-awkward-moment -
I can't access the webpage because of geo restriction.
There is a concert on YouTube. Is this the one?
https://www.youtube.com/watch?v=l5WgAr4B8Vo -
We all bleed blue from the inside....
-
Hello 2nHxWW6GkN1l916N3ayz8HQoi, Tf1 have changed something last two weeks and the script is not working anymore.
I've changed
Code:def get_tf1_info(): response = requests.get(TF1_URL).content.decode() api_key = re.findall(r'"apiKey":"([^"]+)"', response)[0] consent_ids = re.findall(r'neededConsentIds":\[(.*?)]', response)[0].replace("\"", "").split(",") player_version = re.findall(rf'"playerEndpoint":"{TF1_PLAYER}/","version":"([^"]+)"', response)[0] return api_key, consent_ids, format_version(player_version)
Code:def get_tf1_info(): response = requests.get(TF1_URL).content.decode() api_key = re.findall(r'"apiKey\\":\\"([^"]+)\\"', response)[0] consent_ids = re.findall(r'neededConsentIds\\":\[(.*?)]', response)[0].replace("\"", "").split(",") player_version = re.findall(rf'"playerEndpoint\\":\\"{TF1_PLAYER}/\\",\\"version\\":\\"([^"]+)\\"', response)[0] return api_key, consent_ids, format_version(player_version)
Code:keys = get_keys(pssh_value) File "gomytf.py", line 86, in get_video_data pssh_value = str(min( KeyError: 'url'
Code:def get_video_data(source_url): matches = re.findall(r'"embedUrl":"([^"]+)"', requests.get(source_url).content.decode()) match = [m for m in matches if "/player/" in m][0] media_id = re.findall(r'/player/([^/]+)', match)[0] response = requests.get( TF1_MEDIA.format(media_id=media_id), params={'pver': PLAYER_VERSION, 'context': 'context'}, headers={'authorization': f'Bearer {BEARER_TOKEN}'} ) response = response.content.decode() if "GEOBLOCKED" in response: print("VPN FAILURE") exit(0) manifest = json.loads(response)["delivery"]["url"] pssh_value = str(min( re.findall(r'<cenc:pssh>(.+?)</cenc:pssh>', requests.get(manifest).content.decode()), key=len )) return manifest, pssh_value
-
Hello. Like I told in that comment you quoted at the end, the script is outdated. The working version is included in the tool in the sticky threads and my signature. If you have issues with that version, leave a comment in the support thread. I checked and tf1 works with that one.
--[----->+<]>.++++++++++++.---.--------.
[*drm mass downloader: widefrog*]~~~~~~~~~~~[*how to make your own mass downloader: guide*]
Similar Threads
-
downloading a drm protected video
By thitom in forum Video Streaming DownloadingReplies: 21Last Post: 23rd Mar 2025, 09:30 -
Help downloading DRM-protected video
By nyvvo6430 in forum Video Streaming DownloadingReplies: 14Last Post: 5th Jul 2023, 07:53 -
Willing to pay for Help downloading DRM protected video within a player
By shruru in forum Video Streaming DownloadingReplies: 0Last Post: 29th Apr 2023, 12:18 -
Downloading DRM protected video from LOOKE's platform
By Blue Boy in forum Video Streaming DownloadingReplies: 1Last Post: 24th Apr 2021, 16:29 -
help downloading DRM protected video
By Lionking in forum Video Streaming DownloadingReplies: 16Last Post: 5th Feb 2021, 23:53