diff --git a/Link_Pass.ipynb b/Link_Pass.ipynb new file mode 100644 index 0000000..e456165 --- /dev/null +++ b/Link_Pass.ipynb @@ -0,0 +1,1528 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter AdFly Link Below!**" + ], + "metadata": { + "id": "rWggyWOjc2vK" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter AdFly Link Below
\n", + "!pip install cloudscraper\n", + "import cloudscraper\n", + "import re\n", + "from base64 import b64decode\n", + "from urllib.parse import unquote\n", + "\n", + "# ==========================================\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "'''\n", + "404: Complete exception handling not found :(\n", + "'''\n", + "# ==========================================\n", + "\n", + "def decrypt_url(code):\n", + " a, b = '', ''\n", + " for i in range(0, len(code)):\n", + " if i % 2 == 0: a += code[i]\n", + " else: b = code[i] + b\n", + " key = list(a + b)\n", + " i = 0\n", + " while i < len(key):\n", + " if key[i].isdigit():\n", + " for j in range(i+1,len(key)):\n", + " if key[j].isdigit():\n", + " u = int(key[i]) ^ int(key[j])\n", + " if u < 10: key[i] = str(u)\n", + " i = j\t\t\t\t\t\n", + " break\n", + " i+=1\n", + " key = ''.join(key)\n", + " decrypted = b64decode(key)[16:-16]\n", + " return decrypted.decode('utf-8')\n", + "\n", + "# ==========================================\n", + "\n", + "def adfly(url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " res = client.get(url).text\n", + " out = {'error': False, 'src_url': url}\n", + " try:\n", + " ysmm = re.findall(\"ysmm\\s+=\\s+['|\\\"](.*?)['|\\\"]\", res)[0]\n", + " except:\n", + " out['error'] = True\n", + " return out\n", + " url = decrypt_url(ysmm)\n", + " if re.search(r'go\\.php\\?u\\=', url):\n", + " url = b64decode(re.sub(r'(.*?)u=', '', url)).decode()\n", + " elif '&dest=' in url:\n", + " url = unquote(re.sub(r'(.*?)dest=', '', url))\n", + " out['bypassed_url'] = url\n", + " return out\n", + "\n", + "# ==========================================\n", + "\n", + "res = adfly(url)\n", + "\n", + "print(res)" + ], + "metadata": { + "id": "NRE5wW9wc85c", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter GPLinks Link Below!**" + ], + "metadata": { + "id": "vIuY_F5gVVm2" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter GPLinks Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Setting Up!\")\n", + "print(\"Performing Check...\")\n", + "import time\n", + "import cloudscraper\n", + "from bs4 import BeautifulSoup\n", + "from urllib.parse import urlparse\n", + "print(\"Everything Looks Good! Lets Continue.\")\n", + "\n", + "url = \"\" #@param {type:\"string\"} \n", + "print(\"Entered Link:\")\n", + "print(url)\n", + "print(\"Checking Link...\")\n", + "print(\"Checking Done!\")\n", + "print(\"Bypassing Link...\")\n", + "# ==============================================\n", + "\n", + "def gplinks(url: str):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " p = urlparse(url)\n", + " final_url = f\"{p.scheme}://{p.netloc}/links/go\"\n", + " res = client.head(url)\n", + " header_loc = res.headers[\"location\"]\n", + " param = header_loc.split(\"postid=\")[-1]\n", + " req_url = f\"{p.scheme}://{p.netloc}/{param}\"\n", + " p = urlparse(header_loc)\n", + " ref_url = f\"{p.scheme}://{p.netloc}/\"\n", + " h = {\"referer\": ref_url}\n", + " res = client.get(req_url, headers=h, allow_redirects=False)\n", + " bs4 = BeautifulSoup(res.content, \"html.parser\")\n", + " inputs = bs4.find_all(\"input\")\n", + " time.sleep(10) # !important\n", + " data = { input.get(\"name\"): input.get(\"value\") for input in inputs }\n", + " h = {\n", + " \"content-type\": \"application/x-www-form-urlencoded\",\n", + " \"x-requested-with\": \"XMLHttpRequest\"\n", + " }\n", + " time.sleep(10)\n", + " res = client.post(final_url, headers=h, data=data)\n", + " try:\n", + " return res.json()[\"url\"].replace(\"/\",\"/\")\n", + " except: \n", + " return \"Could not Bypass your URL :(\"\n", + "\n", + "# ==============================================\n", + "\n", + "res = gplinks(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Bypassed!\")" + ], + "metadata": { + "id": "PHCG2iGQgDhz", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter GDTot Link as well as your GDTot Crypt! If you don't know how to get Crypt then Learn Here**" + ], + "metadata": { + "id": "cyBiaGkAUtLi" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter GDTot-Link Below
\n", + "!pip install cloudscraper\n", + "import cloudscraper\n", + "import re\n", + "import base64\n", + "from urllib.parse import urlparse, parse_qs\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "GDTot_Crypt = \"b0lDek5LSCt6ZjVRR2EwZnY4T1EvVndqeDRtbCtTWmMwcGNuKy8wYWpDaz0%3D\" #@param {type:\"string\"}\n", + "# ==========================================\n", + "\n", + "def gdtot(url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " match = re.findall(r\"https?://(.+)\\.gdtot\\.(.+)\\/\\S+\\/\\S+\", url)[0]\n", + " client.cookies.update({ \"crypt\": GDTot_Crypt })\n", + " res = client.get(url)\n", + " res = client.get(f\"https://{match[0]}.gdtot.{match[1]}/dld?id={url.split('/')[-1]}\")\n", + " url = re.findall(r'URL=(.*?)\"', res.text)[0]\n", + " info = {}\n", + " info[\"error\"] = False\n", + " params = parse_qs(urlparse(url).query)\n", + " if \"gd\" not in params or not params[\"gd\"] or params[\"gd\"][0] == \"false\":\n", + " info[\"error\"] = True\n", + " if \"msgx\" in params:\n", + " info[\"message\"] = params[\"msgx\"][0]\n", + " else:\n", + " info[\"message\"] = \"Invalid link\"\n", + " else:\n", + " decoded_id = base64.b64decode(str(params[\"gd\"][0])).decode(\"utf-8\")\n", + " drive_link = f\"https://drive.google.com/open?id={decoded_id}\"\n", + " info[\"gdrive_link\"] = drive_link\n", + " if not info[\"error\"]:\n", + " return info[\"gdrive_link\"]\n", + " else:\n", + " return \"Could not generate GDrive URL for your GDTot Link :(\"\n", + "\n", + "# ==========================================\n", + "\n", + "res = gdtot(url)\n", + "\n", + "print(res)" + ], + "metadata": { + "id": "pJCdd8LESBAk", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter Sharer.pw Link, XSRF_TOKEN and laravel_session cookies! If you don't know how to get then then watch this Video (for GDTOT) and do the same for Sharer.pw**" + ], + "metadata": { + "id": "JlOUDYIzlLTD" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter Sharer.pw Link Below
\n", + "!pip install cloudscraper\n", + "import cloudscraper\n", + "import re\n", + "from lxml import etree\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "XSRF_TOKEN = \"\" #@param {type:\"string\"}\n", + "Laravel_Session = \"\" #@param {type:\"string\"}\n", + "'''\n", + "404: Exception Handling Not Found :(\n", + "NOTE:\n", + "DO NOT use the logout button on website. Instead, clear the site cookies manually to log out.\n", + "If you use logout from website, cookies will become invalid.\n", + "'''\n", + "\n", + "# ===================================================================\n", + "\n", + "def parse_info(res):\n", + " f = re.findall(\">(.*?)<\\/td>\", res.text)\n", + " info_parsed = {}\n", + " for i in range(0, len(f), 3):\n", + " info_parsed[f[i].lower().replace(' ', '_')] = f[i+2]\n", + " return info_parsed\n", + "\n", + "def sharer_pw(url, forced_login=False):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " client.cookies.update({\n", + " \"XSRF-TOKEN\": XSRF_TOKEN,\n", + " \"laravel_session\": Laravel_Session\n", + " })\n", + " res = client.get(url)\n", + " token = re.findall(\"_token\\s=\\s'(.*?)'\", res.text, re.DOTALL)[0]\n", + " ddl_btn = etree.HTML(res.content).xpath(\"//button[@id='btndirect']\")\n", + " info_parsed = parse_info(res)\n", + " info_parsed['error'] = True\n", + " info_parsed['src_url'] = url\n", + " info_parsed['link_type'] = 'login'\n", + " info_parsed['forced_login'] = forced_login\n", + " headers = {\n", + " 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n", + " 'x-requested-with': 'XMLHttpRequest'\n", + " }\n", + " data = {\n", + " '_token': token\n", + " }\n", + " if len(ddl_btn):\n", + " info_parsed['link_type'] = 'direct'\n", + " if not forced_login:\n", + " data['nl'] = 1\n", + " try: \n", + " res = client.post(url+'/dl', headers=headers, data=data).json()\n", + " except:\n", + " return info_parsed\n", + " if 'url' in res and res['url']:\n", + " info_parsed['error'] = False\n", + " info_parsed['gdrive_link'] = res['url']\n", + " if len(ddl_btn) and not forced_login and not 'url' in info_parsed:\n", + " # retry download via login\n", + " return sharer_pw(url, forced_login=True)\n", + " return info_parsed\n", + "\n", + "# ===================================================================\n", + "\n", + "res = sharer_pw(url)\n", + "\n", + "print(res)" + ], + "metadata": { + "id": "jY_RrpdYiTqj", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter DropLink Below!**" + ], + "metadata": { + "id": "2PlnCgEllyT7" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter Drop Link Below
\n", + "!pip install cloudscraper\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "# ==============================================\n", + "\n", + "def droplink(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"droplink\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# ==============================================\n", + "\n", + "res = droplink(url)\n", + "\n", + "print(res)" + ], + "metadata": { + "id": "611_HcrXfOOr", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter AppDrive or DriveApp etc. Look-Alike Link and as well as the Account Details (Required for Login Required Links only)**" + ], + "metadata": { + "id": "WlRAIhcUoVHb" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter App Drive or Drive App Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "import cloudscraper\n", + "import re\n", + "import requests\n", + "from lxml import etree\n", + "from urllib.parse import urlparse\n", + "\n", + "\n", + "# Website User Account (NOT GOOGLE ACCOUNT) ----\n", + "url = \"\" #@param {type:\"string\"}\n", + "Email = \"OPTIONAL\" #@param {type:\"string\"}\n", + "Password = \"OPTIONAL\" #@param {type:\"string\"}\n", + "\n", + "'''\n", + "NOTE: \n", + " - Auto-detection for non-login urls, and indicated via 'link_type' (direct/login) in output.\n", + "SUPPORTED DOMAINS:\n", + " - appdrive.in\n", + " - driveapp.in\n", + " - drivehub.in\n", + " - gdflix.pro\n", + " - drivesharer.in\n", + " - drivebit.in\n", + " - drivelinks.in\n", + " - driveace.in\n", + " - drivepro.in\n", + " \n", + "'''\n", + "print(\"Generating GDrive Link...\")\n", + "\n", + "# ===================================================================\n", + "\n", + "def unified(url):\n", + " try:\n", + " account = {\"email\": Email, \"passwd\": Password}\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " client.headers.update(\n", + " {\n", + " \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36\"\n", + " }\n", + " )\n", + " data = {\"email\": account[\"email\"], \"password\": account[\"passwd\"]}\n", + " client.post(f\"https://{urlparse(url).netloc}/login\", data=data)\n", + " res = client.get(url)\n", + " key = re.findall('\"key\",\\s+\"(.*?)\"', res.text)[0]\n", + " ddl_btn = etree.HTML(res.content).xpath(\"//button[@id='drc']\")\n", + " info = re.findall(\">(.*?)<\\/li>\", res.text)\n", + " info_parsed = {}\n", + " for item in info:\n", + " kv = [s.strip() for s in item.split(\":\", maxsplit=1)]\n", + " info_parsed[kv[0].lower()] = kv[1]\n", + " info_parsed = info_parsed\n", + " info_parsed[\"error\"] = False\n", + " info_parsed[\"link_type\"] = \"login\"\n", + " headers = {\n", + " \"Content-Type\": f\"multipart/form-data; boundary={'-'*4}_\",\n", + " }\n", + " data = {\"type\": 1, \"key\": key, \"action\": \"original\"}\n", + " if len(ddl_btn):\n", + " info_parsed[\"link_type\"] = \"direct\"\n", + " data[\"action\"] = \"direct\"\n", + " while data[\"type\"] <= 3:\n", + " boundary = f'{\"-\"*6}_'\n", + " data_string = \"\"\n", + " for item in data:\n", + " data_string += f\"{boundary}\\r\\n\"\n", + " data_string += f'Content-Disposition: form-data; name=\"{item}\"\\r\\n\\r\\n{data[item]}\\r\\n'\n", + " data_string += f\"{boundary}--\\r\\n\"\n", + " gen_payload = data_string\n", + " try:\n", + " response = client.post(url, data=gen_payload, headers=headers).json()\n", + " break\n", + " except BaseException:\n", + " data[\"type\"] += 1\n", + " if \"url\" in response:\n", + " info_parsed[\"gdrive_link\"] = response[\"url\"]\n", + " elif \"error\" in response and response[\"error\"]:\n", + " info_parsed[\"error\"] = True\n", + " info_parsed[\"error_message\"] = response[\"message\"]\n", + " else:\n", + " info_parsed[\"error\"] = True\n", + " info_parsed[\"error_message\"] = \"Something went wrong :(\"\n", + " if info_parsed[\"error\"]:\n", + " return info_parsed\n", + " if urlparse(url).netloc == \"driveapp.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " info_parsed[\"src_url\"] = url\n", + " if urlparse(url).netloc == \"drivehub.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if urlparse(url).netloc == \"gdflix.pro\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + "\n", + " if urlparse(url).netloc == \"drivesharer.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if urlparse(url).netloc == \"drivebit.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if urlparse(url).netloc == \"drivelinks.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if urlparse(url).netloc == \"driveace.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if urlparse(url).netloc == \"drivepro.in\" and not info_parsed[\"error\"]:\n", + " res = client.get(info_parsed[\"gdrive_link\"])\n", + " drive_link = etree.HTML(res.content).xpath(\n", + " \"//a[contains(@class,'btn')]/@href\"\n", + " )[0]\n", + " info_parsed[\"gdrive_link\"] = drive_link\n", + " if info_parsed[\"error\"]:\n", + " return \"Faced an Unknown Error!\"\n", + " return info_parsed[\"gdrive_link\"]\n", + " except BaseException:\n", + " return \"Unable to Extract GDrive Link\"\n", + "\n", + "# ===================================================================\n", + "\n", + "res = unified(url)\n", + "\n", + "print(res)" + ], + "metadata": { + "id": "0vqE8a8dm5T4", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter Linkvertise Link Below!**" + ], + "metadata": { + "id": "OY1CtzT0pA8u" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter Linkvertise-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Importing Files!\")\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "print(\"You have Entered:\")\n", + "print(\"URL:\")\n", + "print(url)\n", + "print(\"Bypassing the Link...\")\n", + "# -------------------------------------------\n", + "\n", + "def linkvertise(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"linkvertise\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# -------------------------------------------\n", + "\n", + "res = linkvertise(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Bypassed!\")" + ], + "metadata": { + "id": "4yV_DpjXpBXj", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter MDisk Link Below!**" + ], + "metadata": { + "id": "fvzqOzXVR7jX" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter MDisk-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Importing Files!\")\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "print(\"You have Entered:\")\n", + "print(\"URL:\")\n", + "print(url)\n", + "print(\"Generating Direct-Download Link...\")\n", + "# -------------------------------------------\n", + "\n", + "def mdisk(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"mdisk\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# -------------------------------------------\n", + "\n", + "res = mdisk(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Generated Direct-Download Link!\")" + ], + "metadata": { + "id": "WXQH2_YlSDkH", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter RockLinks Link Below!**" + ], + "metadata": { + "id": "IVaFnZycUqSY" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter Rocklinks-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Setting Up!\")\n", + "print(\"Performing Check...\")\n", + "import time\n", + "import cloudscraper\n", + "from bs4 import BeautifulSoup \n", + "print(\"Everything Looks Good! Lets Continue.\")\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "\n", + " \n", + "\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "\n", + "def bypass(url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " if 'rocklinks.net' in url:\n", + " DOMAIN = \"https://blog.disheye.com\"\n", + " else:\n", + " DOMAIN = \"https://rocklinks.net\"\n", + "\n", + " url = url[:-1] if url[-1] == '/' else url\n", + "\n", + " code = url.split(\"/\")[-1]\n", + " if 'rocklinks.net' in url:\n", + " final_url = f\"{DOMAIN}/{code}?quelle=\" \n", + " else:\n", + " final_url = f\"{DOMAIN}/{code}\"\n", + "\n", + " resp = client.get(final_url)\n", + " soup = BeautifulSoup(resp.content, \"html.parser\")\n", + " \n", + " try: inputs = soup.find(id=\"go-link\").find_all(name=\"input\")\n", + " except: return \"Incorrect Link\"\n", + " \n", + " data = { input.get('name'): input.get('value') for input in inputs }\n", + "\n", + " h = { \"x-requested-with\": \"XMLHttpRequest\" }\n", + " \n", + " time.sleep(10)\n", + " r = client.post(f\"{DOMAIN}/links/go\", data=data, headers=h)\n", + " try:\n", + " return r.json()['url']\n", + " except: return \"Something went wrong :(\"\n", + "\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "res = bypass(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Bypassed!\")" + ], + "metadata": { + "id": "bG5bvLAxUui3", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter PixelDrain Link Below!**" + ], + "metadata": { + "id": "VvNupVRQwRIh" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter PixelDrain-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Importing Files!\")\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "print(\"You have Entered:\")\n", + "print(\"URL:\")\n", + "print(url)\n", + "print(\"Generating Direct-Download Link...\")\n", + "# -------------------------------------------\n", + "\n", + "def pixeldrain(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"pixeldrain\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# -------------------------------------------\n", + "\n", + "res = pixeldrain(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Generated Direct-Download Link!\")" + ], + "metadata": { + "id": "31TfucycwWaQ", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter WeTransfer Link Below!**" + ], + "metadata": { + "id": "U385CJEoxl0O" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter WeTransfer-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Importing Files!\")\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "print(\"You have Entered:\")\n", + "print(\"URL:\")\n", + "print(url)\n", + "print(\"Generating Direct-Download Link...\")\n", + "# -------------------------------------------\n", + "\n", + "def wetransfer(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"wetransfer\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# -------------------------------------------\n", + "\n", + "res = wetransfer(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Generated Direct-Download Link!\")" + ], + "metadata": { + "id": "-DDkk6qaxpst", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter MegaUp Link Below!**" + ], + "metadata": { + "id": "cGsBdNPLzE4I" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter MegaUp-Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Importing Files!\")\n", + "import cloudscraper\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "print(\"You have Entered:\")\n", + "print(\"URL:\")\n", + "print(url)\n", + "print(\"Generating Direct-Download Link...\")\n", + "# -------------------------------------------\n", + "\n", + "def megaup(url):\n", + " api = \"https://api.emilyx.in/api\"\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " resp = client.get(url)\n", + " if resp.status_code == 404:\n", + " return \"File not found/The link you entered is wrong!\"\n", + " try:\n", + " resp = client.post(api, json={\"type\": \"megaup\", \"url\": url})\n", + " res = resp.json()\n", + " except BaseException:\n", + " return \"API UnResponsive / Invalid Link !\"\n", + " if res[\"success\"] is True:\n", + " return res[\"url\"]\n", + " else:\n", + " return res[\"msg\"]\n", + "\n", + "# -------------------------------------------\n", + "\n", + "res = megaup(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Generated Direct-Download Link!\")" + ], + "metadata": { + "id": "jZYIX9-SzKHJ", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter Gyanilinks Link Below!**" + ], + "metadata": { + "id": "hzP92k6eACgP" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter gyanilinks Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Setting Up!\")\n", + "print(\"Performing Check...\")\n", + "import time\n", + "import cloudscraper\n", + "from bs4 import BeautifulSoup \n", + "print(\"Everything Looks Good! Lets Continue.\")\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "\n", + "\n", + "'''\n", + "NOTE: \n", + "SUPPORTED DOMAINS:\n", + " - gtlinks.me\n", + " \n", + "'''\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "\n", + "def bypass(url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " if 'gtlinks.me' in url:\n", + " DOMAIN = \"https://go.bloggertheme.xyz\"\n", + " else:\n", + " return \"Incorrect Link\"\n", + "\n", + " url = url[:-1] if url[-1] == '/' else url\n", + "\n", + " code = url.split(\"/\")[-1]\n", + " \n", + " final_url = f\"{DOMAIN}/{code}\"\n", + "\n", + " resp = client.get(final_url)\n", + " soup = BeautifulSoup(resp.content, \"html.parser\")\n", + " \n", + " try: inputs = soup.find(id=\"go-link\").find_all(name=\"input\")\n", + " except: return \"Incorrect Link\"\n", + " \n", + " data = { input.get('name'): input.get('value') for input in inputs }\n", + "\n", + " h = { \"x-requested-with\": \"XMLHttpRequest\" }\n", + " \n", + " time.sleep(5)\n", + " r = client.post(f\"{DOMAIN}/links/go\", data=data, headers=h)\n", + " try:\n", + " return r.json()['url']\n", + " except: return \"Something went wrong :(\"\n", + "\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "\n", + "res = bypass(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Bypassed!\")" + ], + "metadata": { + "id": "_iWf_OF9jOKq", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter Shortingly Link Below!**" + ], + "metadata": { + "id": "ew-UlUfi_eSv" + } + }, + { + "cell_type": "code", + "source": [ + "#@markdown
\n", + "#@title
Enter shortingly Link Below
\n", + "print(\"Downloading Cloud-Scraper...\")\n", + "!pip install cloudscraper\n", + "print(\"Setting Up!\")\n", + "print(\"Performing Check...\")\n", + "import time\n", + "import cloudscraper\n", + "from bs4 import BeautifulSoup \n", + "print(\"Everything Looks Good! Lets Continue.\")\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "\n", + "\n", + "'''\n", + "NOTE: \n", + "SUPPORTED DOMAINS:\n", + " - shortingly.me\n", + " \n", + "'''\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "\n", + "def bypass(url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " if 'shortingly.me' in url:\n", + " DOMAIN = \"https://go.techyjeeshan.xyz\"\n", + " else:\n", + " return \"Incorrect Link\"\n", + "\n", + " url = url[:-1] if url[-1] == '/' else url\n", + "\n", + " code = url.split(\"/\")[-1]\n", + " \n", + " final_url = f\"{DOMAIN}/{code}\"\n", + "\n", + " resp = client.get(final_url)\n", + " soup = BeautifulSoup(resp.content, \"html.parser\")\n", + " \n", + " try: inputs = soup.find(id=\"go-link\").find_all(name=\"input\")\n", + " except: return \"Incorrect Link\"\n", + " \n", + " data = { input.get('name'): input.get('value') for input in inputs }\n", + "\n", + " h = { \"x-requested-with\": \"XMLHttpRequest\" }\n", + " \n", + " time.sleep(5)\n", + " r = client.post(f\"{DOMAIN}/links/go\", data=data, headers=h)\n", + " try:\n", + " return r.json()['url']\n", + " except: return \"Something went wrong :(\"\n", + "\n", + "# ---------------------------------------------------------------------------------------------------------------------\n", + "\n", + "res = bypass(url)\n", + "\n", + "print(res)\n", + "print(\"Successfully Bypassed!\")" + ], + "metadata": { + "id": "VLQNAB35oAGZ", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter ShareUs Link Below!**" + ], + "metadata": { + "id": "xOd-WNFWhs6w" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter ShareUs Link Below
\n", + "#@markdown
\n", + "import requests\n", + "\n", + "url = \"https://shareus.in/?i=y3wWSo\" #@param {type:\"string\"}\n", + "token = url.split(\"=\")[-1]\n", + "\n", + "bypassed_url = \"https://us-central1-my-apps-server.cloudfunctions.net/r?shortid=\"+ token\n", + "response = requests.get(bypassed_url).text\n", + "print(response)" + ], + "metadata": { + "cellView": "form", + "id": "nMbSeTR8gVdZ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter AnonFiles Link Below!**" + ], + "metadata": { + "id": "EvqkOIgJbfr5" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter AnonFiles Link Below
\n", + "#@markdown
\n", + "import requests\n", + "\n", + "url = \"https://anonfiles.com/j7R7W5Pcnc/x9000_Deep_Web_Links\" #@param {type:\"string\"}\n", + "\n", + "headersList = { \"Accept\": \"*/*\"}\n", + "payload = \"\"\n", + "\n", + "response = requests.request(\"GET\", url, data=payload, headers=headersList).text.split(\"\\n\")\n", + "for ele in response:\n", + " if \"https://cdn\" in ele and \"anonfiles.com\" in ele and url.split(\"/\")[-2] in ele:\n", + " break\n", + "\n", + "print(ele.split('href=\"')[1].split('\"')[0])" + ], + "metadata": { + "cellView": "form", + "id": "n88drbVcKNOV" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter GoFile Link Below!**" + ], + "metadata": { + "id": "soH3D7fZveCX" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter GoFile Link Below
\n", + "#@markdown
\n", + "!pip install hashlib\n", + "\n", + "import hashlib\n", + "import requests\n", + "\n", + "def gofile_dl(url,password=\"\"):\n", + " api_uri = 'https://api.gofile.io'\n", + " client = requests.Session()\n", + " res = client.get(api_uri+'/createAccount').json()\n", + " \n", + " data = {\n", + " 'contentId': url.split('/')[-1],\n", + " 'token': res['data']['token'],\n", + " 'websiteToken': '12345',\n", + " 'cache': 'true',\n", + " 'password': hashlib.sha256(password.encode('utf-8')).hexdigest()\n", + " }\n", + " res = client.get(api_uri+'/getContent', params=data).json()\n", + "\n", + " content = []\n", + " for item in res['data']['contents'].values():\n", + " content.append(item)\n", + " \n", + " return {\n", + " 'accountToken': data['token'],\n", + " 'files': content\n", + " }[\"files\"][0][\"link\"]\n", + "\n", + "\n", + "url = \"\" #@param {type:\"string\"}\n", + "password = \"OPTIONAL\" #@param {type:\"string\"}\n", + "print(gofile_dl(url,password))" + ], + "metadata": { + "cellView": "form", + "id": "8_ZTsc0vvc1y" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter PSA link Below!**" + ], + "metadata": { + "id": "eUj7FRLCxT9k" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter PSA Link Below
\n", + "#@markdown
\n", + "\n", + "!pip install cloudscraper\n", + "!pip install bs4\n", + "\n", + "import cloudscraper\n", + "from bs4 import BeautifulSoup\n", + "import time\n", + "import re\n", + "\n", + "def try2link_bypass(url):\n", + "\tclient = cloudscraper.create_scraper(allow_brotli=False)\n", + "\t\n", + "\turl = url[:-1] if url[-1] == '/' else url\n", + "\t\n", + "\tparams = (('d', int(time.time()) + (60 * 4)),)\n", + "\tr = client.get(url, params=params, headers= {'Referer': 'https://newforex.online/'})\n", + "\t\n", + "\tsoup = BeautifulSoup(r.text, 'html.parser')\n", + "\tinputs = soup.find(id=\"go-link\").find_all(name=\"input\")\n", + "\tdata = { input.get('name'): input.get('value') for input in inputs }\t\n", + "\ttime.sleep(7)\n", + "\t\n", + "\theaders = {'Host': 'try2link.com', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://try2link.com', 'Referer': url}\n", + "\t\n", + "\tbypassed_url = client.post('https://try2link.com/links/go', headers=headers,data=data)\n", + "\treturn bypassed_url.json()[\"url\"]\n", + "\t\t\n", + "\n", + "def try2link_scrape(url):\n", + "\tclient = cloudscraper.create_scraper(allow_brotli=False)\t\n", + "\th = {\n", + "\t'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',\n", + "\t}\n", + "\tres = client.get(url, cookies={}, headers=h)\n", + "\turl = 'https://try2link.com/'+re.findall('try2link\\.com\\/(.*?) ', res.text)[0]\n", + "\treturn try2link_bypass(url)\n", + " \n", + "\n", + "def psa_bypasser(psa_url):\n", + " client = cloudscraper.create_scraper(allow_brotli=False)\n", + " r = client.get(psa_url)\n", + " soup = BeautifulSoup(r.text, \"html.parser\").find_all(class_=\"dropshadowboxes-drop-shadow dropshadowboxes-rounded-corners dropshadowboxes-inside-and-outside-shadow dropshadowboxes-lifted-both dropshadowboxes-effect-default\")\n", + " links = \"\"\n", + " for link in soup:\n", + " try:\n", + " exit_gate = link.a.get(\"href\")\n", + " links = links + try2link_scrape(exit_gate) + '\\n'\n", + " except: pass\n", + " return links\n", + "\n", + "url = \"https://psa.pm/movie/the-infernal-machine-2022/\" #@param {type:\"string\"}\n", + "print(psa_bypasser(url))\n" + ], + "metadata": { + "cellView": "form", + "id": "Mo7njiR0xCXt" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter OUO Link Below!**" + ], + "metadata": { + "id": "0FUIhDVl0Xri" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter OUO Link Below
\n", + "#@markdown
\n", + "\n", + "!pip install bs4\n", + "\n", + "import requests\n", + "import re\n", + "from urllib.parse import urlparse\n", + "from bs4 import BeautifulSoup\n", + "\n", + "# RECAPTCHA v3 BYPASS\n", + "# code from https://github.com/xcscxr/Recaptcha-v3-bypass\n", + "def RecaptchaV3(ANCHOR_URL):\n", + " url_base = 'https://www.google.com/recaptcha/'\n", + " post_data = \"v={}&reason=q&c={}&k={}&co={}\"\n", + " client = requests.Session()\n", + " client.headers.update({\n", + " 'content-type': 'application/x-www-form-urlencoded'\n", + " })\n", + " matches = re.findall('([api2|enterprise]+)\\/anchor\\?(.*)', ANCHOR_URL)[0]\n", + " url_base += matches[0]+'/'\n", + " params = matches[1]\n", + " res = client.get(url_base+'anchor', params=params)\n", + " token = re.findall(r'\"recaptcha-token\" value=\"(.*?)\"', res.text)[0]\n", + " params = dict(pair.split('=') for pair in params.split('&'))\n", + " post_data = post_data.format(params[\"v\"], token, params[\"k\"], params[\"co\"])\n", + " res = client.post(url_base+'reload', params=f'k={params[\"k\"]}', data=post_data)\n", + " answer = re.findall(r'\"rresp\",\"(.*?)\"', res.text)[0] \n", + " return answer\n", + "\n", + "\n", + "# code from https://github.com/xcscxr/ouo-bypass/\n", + "ANCHOR_URL = 'https://www.google.com/recaptcha/api2/anchor?ar=1&k=6Lcr1ncUAAAAAH3cghg6cOTPGARa8adOf-y9zv2x&co=aHR0cHM6Ly9vdW8uaW86NDQz&hl=en&v=1B_yv3CBEV10KtI2HJ6eEXhJ&size=invisible&cb=4xnsug1vufyr'\n", + "def ouo(url):\n", + " client = requests.Session()\n", + " tempurl = url.replace(\"ouo.press\", \"ouo.io\")\n", + " p = urlparse(tempurl)\n", + " id = tempurl.split('/')[-1]\n", + " \n", + " res = client.get(tempurl)\n", + " next_url = f\"{p.scheme}://{p.hostname}/go/{id}\"\n", + "\n", + " for _ in range(2):\n", + " if res.headers.get('Location'):\n", + " break\n", + " bs4 = BeautifulSoup(res.content, 'lxml')\n", + " inputs = bs4.form.findAll(\"input\", {\"name\": re.compile(r\"token$\")})\n", + " data = { input.get('name'): input.get('value') for input in inputs }\n", + " \n", + " ans = RecaptchaV3(ANCHOR_URL)\n", + " data['x-token'] = ans\n", + " h = {\n", + " 'content-type': 'application/x-www-form-urlencoded'\n", + " }\n", + " res = client.post(next_url, data=data, headers=h, allow_redirects=False)\n", + " next_url = f\"{p.scheme}://{p.hostname}/xreallcygo/{id}\"\n", + "\n", + " return res.headers.get('Location')\n", + "\n", + "url = \"https://ouo.press/Zu7Vs5\" #@param {type:\"string\"}\n", + "print(ouo(url))\n" + ], + "metadata": { + "cellView": "form", + "id": "XNKe8fWY0XMA" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter DropBox Link Below**" + ], + "metadata": { + "id": "Z8IwSNm-PA8U" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter DropBox Link Below
\n", + "#@markdown
\n", + "\n", + "url = \"https://www.dropbox.com/s/h5cte786r72zxel/apple-iphone-6-plus-1.jpg?dl=0\" #@param {type:\"string\"}\n", + "print(url.replace(\"www.\",\"\").replace(\"dropbox.com\",\"dl.dropboxusercontent.com\").replace(\"?dl=0\",\"\"))" + ], + "metadata": { + "cellView": "form", + "id": "Vrsdrs5_O_T0" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter FileCrypt Link Below!**" + ], + "metadata": { + "id": "Eysjw_9wp8SA" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter FileCrypt Link Below
\n", + "#@markdown
\n", + "\n", + "\n", + "!pip install bs4\n", + "!pip install cloudscraper\n", + "\n", + "from bs4 import BeautifulSoup\n", + "import cloudscraper\n", + "import json\n", + "\n", + "client = cloudscraper.create_scraper(allow_brotli=False)\n", + "\n", + "\n", + "# by https://github.com/bipinkrish/filecrypt-bypass\n", + "def getlinks(dlc):\n", + " headers = {\n", + " 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',\n", + " 'Accept': 'application/json, text/javascript, */*',\n", + " 'Accept-Language': 'en-US,en;q=0.5',\n", + " # 'Accept-Encoding': 'gzip, deflate',\n", + " 'X-Requested-With': 'XMLHttpRequest',\n", + " 'Origin': 'http://dcrypt.it',\n", + " 'Connection': 'keep-alive',\n", + " 'Referer': 'http://dcrypt.it/',\n", + " }\n", + "\n", + " data = {\n", + " 'content': dlc,\n", + " }\n", + "\n", + " response = client.post('http://dcrypt.it/decrypt/paste', headers=headers, data=data).json()[\"success\"][\"links\"]\n", + " links = \"\"\n", + " for link in response:\n", + " links = links + link + \"\\n\"\n", + " print(links)\n", + " return links\n", + "\n", + "\n", + "# by https://github.com/bipinkrish/filecrypt-bypass\n", + "def filecrypt(url):\n", + "\n", + " headers = {\n", + " \"authority\": \"filecrypt.co\",\n", + " \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n", + " \"accept-language\": \"en-US,en;q=0.9\",\n", + " \"cache-control\": \"max-age=0\",\n", + " \"content-type\": \"application/x-www-form-urlencoded\",\n", + " \"dnt\": \"1\",\n", + " \"origin\": \"https://filecrypt.co\",\n", + " \"referer\": url,\n", + " \"sec-ch-ua\": '\"Google Chrome\";v=\"105\", \"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"105\"',\n", + " \"sec-ch-ua-mobile\": \"?0\",\n", + " \"sec-ch-ua-platform\": \"Windows\",\n", + " \"sec-fetch-dest\": \"document\",\n", + " \"sec-fetch-mode\": \"navigate\",\n", + " \"sec-fetch-site\": \"same-origin\",\n", + " \"sec-fetch-user\": \"?1\",\n", + " \"upgrade-insecure-requests\": \"1\",\n", + " \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36\" \n", + " }\n", + " \n", + "\n", + " resp = client.get(url, headers=headers)\n", + " soup = BeautifulSoup(resp.content, \"html.parser\")\n", + "\n", + " buttons = soup.find_all(\"button\")\n", + " for ele in buttons:\n", + " line = ele.get(\"onclick\")\n", + " if line !=None and \"DownloadDLC\" in line:\n", + " dlclink = \"https://filecrypt.co/DLC/\" + line.split(\"DownloadDLC('\")[1].split(\"'\")[0] + \".html\"\n", + " break\n", + "\n", + " resp = client.get(dlclink,headers=headers)\n", + " getlinks(resp.text)\n", + "\n", + "\n", + "url= \"https://filecrypt.co/Container/73F6D9D43B.html\" #@param {type:\"string\"}\n", + "filecrypt(url)" + ], + "metadata": { + "cellView": "form", + "id": "s1aVxPqCp63Q" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter ZippyShare Link Below!**" + ], + "metadata": { + "id": "-4iLgcZIDAJ9" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter ZippyShare Link Below
\n", + "#@markdown
\n", + "\n", + "!pip install requests\n", + "\n", + "import requests\n", + "\n", + "url = \"https://www32.zippyshare.com/v/H41DL3pu/file.html\" #@param {type:\"string\"}\n", + "resp = requests.get(\"https://www32.zippyshare.com/v/H41DL3pu/file.html\").text\n", + "surl = resp.split(\"document.getElementById('dlbutton').href = \")[1].split(\";\")[0]\n", + "parts = surl.split(\"(\")[1].split(\")\")[0].split(\" \")\n", + "val = str(int(parts[0]) % int(parts[2]) + int(parts[4]) % int(parts[6]))\n", + "surl = surl.split('\"')\n", + "burl = url.split(\"zippyshare.com\")[0]\n", + "furl = burl + \"zippyshare.com\" + surl[1] + val + surl[-2]\n", + "print(furl)" + ], + "metadata": { + "cellView": "form", + "id": "EdOE4oDNC_oN" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Enter MediaFire Link Below!**" + ], + "metadata": { + "id": "KiN1bcyBGHLs" + } + }, + { + "cell_type": "code", + "source": [ + "#@title
Enter MediaFire Link Below
\n", + "#@markdown
\n", + "\n", + "!pip install requests\n", + "\n", + "import requests\n", + "import re\n", + "\n", + "def extractDownloadLink(contents):\n", + " for line in contents.splitlines():\n", + " m = re.search(r'href=\"((http|https)://download[^\"]+)', line)\n", + " if m:\n", + " return m.groups()[0]\n", + "\n", + "url = \"http://www.mediafire.com/?5tn6rebctezdqlt\" #@param {type:\"string\"}\n", + "res = requests.get(url, stream=True)\n", + "url = extractDownloadLink(res.text)\n", + "print(url)" + ], + "metadata": { + "cellView": "form", + "id": "DhXB-UZuGGZ9" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file