Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
Added version 0.0.1
  • Loading branch information
f4rh4d-4hmed authored Mar 19, 2024
1 parent 7d3e52c commit 5132d03
Show file tree
Hide file tree
Showing 5 changed files with 165 additions and 0 deletions.
Binary file added FTP-Downloader.exe
Binary file not shown.
Binary file added Scraper.exe
Binary file not shown.
71 changes: 71 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from colorama import init, Fore, Style
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# Initialize Colorama
init(autoreset=True)

# Author signature
author_signature = Fore.YELLOW + Style.BRIGHT + """
▒█▀▀▀ ▀▀█▀▀ ▒█▀▀█ ░░ ▒█▀▀▄ ▒█▀▀▀█ ▒█░░▒█ ▒█▄░▒█ ▒█░░░ ▒█▀▀▀█ ░█▀▀█ ▒█▀▀▄ ▒█▀▀▀ ▒█▀▀█
▒█▀▀▀ ░▒█░░ ▒█▄▄█ ▀▀ ▒█░▒█ ▒█░░▒█ ▒█▒█▒█ ▒█▒█▒█ ▒█░░░ ▒█░░▒█ ▒█▄▄█ ▒█░▒█ ▒█▀▀▀ ▒█▄▄▀
▒█░░░ ░▒█░░ ▒█░░░ ░░ ▒█▄▄▀ ▒█▄▄▄█ ▒█▄▀▄█ ▒█░░▀█ ▒█▄▄█ ▒█▄▄▄█ ▒█░▒█ ▒█▄▄▀ ▒█▄▄▄ ▒█░▒█
"""
print(author_signature)

# Created By
print(Fore.CYAN + Style.BRIGHT + "Created By: Farhad Ahmed")

# Version
print(Fore.GREEN + Style.BRIGHT + "Version: 0.0.1")

# Set to keep track of visited URLs
visited_urls = set()

# Base URL of the website
base_url = 'http://103.170.204.250/FILE/'

# Function to scan a URL
def scan_url(url):
# Check if URL has already been visited
if url in visited_urls:
return

# Add URL to visited set
visited_urls.add(url)

# Fetch HTML content
response = requests.get(url)

# Check if request was successful
if response.status_code == 200:
# Parse HTML
soup = BeautifulSoup(response.content, 'html.parser')

# Search for specified file formats
file_formats = ['.mp4', '.mkv', '.mp3', '.rar', '.zip', '.pkg', '.dod']
for link in soup.find_all('a'):
href = link.get('href')
full_url = urljoin(url, href) # Construct full URL

if any(format in href for format in file_formats):
# Save full URL to found.html
with open('urls.txt', 'a') as file:
file.write(f"{full_url}\n")
else:
# Recursively explore directory links
if href.endswith('/'):
scan_url(full_url)

else:
print(Fore.RED + f"Failed to fetch URL: {url}")

# Start scanning from the top URL
scan_url(base_url)

# Check if the entire website is scanned by comparing the number of visited URLs
# If the top URL is visited twice, stop the scanning process
if len(visited_urls) > 1:
print(Fore.GREEN + "Scanning complete.")
94 changes: 94 additions & 0 deletions search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import os
import urllib.parse
import webbrowser
import requests
from colorama import init, Fore, Style

# Initialize Colorama
init()

def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')

def search_urls(keyword):
urls = []
if os.path.exists('url.lock'):
with open('url.lock', 'r') as file:
for line in file:
title_start = line.rfind('/') + 1
title_end = line.rfind('.')
title = line[title_start:title_end].lower()
url = line.lower()
keyword_words = keyword.lower().split()
# Check if all words in the keyword are present in either the title or the URL
if all(word in title or word in url for word in keyword_words):
urls.append(line.strip())
return urls

def format_text(text):
# Replace %20 with spaces, replace dots with spaces
formatted_text = text.replace('%20', ' ')
formatted_text = formatted_text.replace('.', ' ')
return formatted_text

def format_file_name(url):
# Decode URL and replace %20 with spaces, replace dots with spaces
file_name = urllib.parse.unquote(url.split('/')[-1])
file_name = file_name.replace('.', ' ')
return file_name.rsplit('.', 1)[0] # Remove file extension

def download_file(url):
try:
file_name = url.split('/')[-1]
with open(file_name, 'wb') as f:
response = requests.get(url)
f.write(response.content)
print(Fore.GREEN + f"File '{file_name}' downloaded successfully." + Style.RESET_ALL)
except Exception as e:
print(Fore.RED + f"An error occurred while downloading the file: {e}" + Style.RESET_ALL)
print(Fore.YELLOW + "Opening the link in browser instead..." + Style.RESET_ALL)
webbrowser.open(url)

def main():
while True:
clear_screen() # Clear the screen before displaying new content
if not os.path.exists('url.lock'):
print(Fore.RED + "Error: 'url.lock' file not found." + Style.RESET_ALL)

keyword = input(Fore.MAGENTA + "Author:Farhad Ahmed\nVersion:0.0.1\nEnter The Movie Or Animation Or Games Name: " + Style.RESET_ALL)

matching_urls = search_urls(keyword)

if matching_urls:
print(Fore.CYAN + "Matching URLs:" + Style.RESET_ALL)
for i, url in enumerate(matching_urls, 1):
print(f"{i}. {format_file_name(url)}")

selected_index = input(Fore.MAGENTA + "Here Is the results\nType the number from list: " + Style.RESET_ALL).strip()
if selected_index.isdigit():
index = int(selected_index) - 1
if 0 <= index < len(matching_urls):
selected_url = matching_urls[index]
print(Fore.GREEN + f"Selected URL: {selected_url}" + Style.RESET_ALL)
while True:
option = input("\nSelect an option:\n1. Download\n2. Open in browser\n3. Show link\nEnter option number: ").strip()
if option == '1':
download_file(selected_url)
break
elif option == '2':
webbrowser.open(selected_url)
break
elif option == '3':
print(selected_url)
break
else:
print(Fore.RED + "Invalid option. Please enter a valid option number." + Style.RESET_ALL)
else:
print(Fore.RED + "Invalid index. Please enter a valid index." + Style.RESET_ALL)
else:
print(Fore.RED + "Invalid input. Please enter a number." + Style.RESET_ALL)
else:
print(Fore.YELLOW + "No matching URLs found." + Style.RESET_ALL)

if __name__ == "__main__":
main()
Binary file added url.zip
Binary file not shown.

0 comments on commit 5132d03

Please sign in to comment.