Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

设置默认多进程数量 #3

Merged
merged 4 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions create_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,32 +6,35 @@
@Desc : 本部分是对任务进行多进程的调配
'''
import os
import js_finder
import url_scanner
import multiprocessing
from pathlib import Path
from colorama import Fore, Style
from tqdm import tqdm

class Scanner():
def __init__(self, urls:list[str], proxy:str|None, max_cpu:int|None, force_cpu:int|None) -> None:
def __init__(self, urls:list[str], proxy:str|None, max_cpu:int|None, force_cpu:int|None, find:bool) -> None:
self.urls = urls
self.total = len(urls)
self.finish = 0
self.proxy = proxy
self.max_cpu = max_cpu
self.force_cpu = force_cpu
self.find = find
self.res_file_path = Path('result.txt')
def worker(self,args:tuple[str,str]) -> str:
return url_scanner.scan_urls(args[0],args[1])
return url_scanner.scan_urls(args[0],args[1],args[2])
def start(self) -> None:
cpu_count = multiprocessing.cpu_count()
cpu_count *= 5
cpu_count = self.max_cpu if self.max_cpu is not None and self.max_cpu <= cpu_count * 5 else cpu_count
cpu_count = self.max_cpu if self.max_cpu is not None and self.max_cpu <= cpu_count else cpu_count
cpu_count = 60 if cpu_count > 60 else cpu_count # 多于63个进程会导致报错
cpu_count = self.force_cpu if self.force_cpu is not None else cpu_count
try:
with multiprocessing.Pool(cpu_count) as pool:
res_list = pool.imap_unordered(self.worker,[(i,self.proxy) for i in self.urls])
js_finder.delete_files_in_js_directory()#清理一下js文件
res_list = pool.imap_unordered(self.worker,[(i,self.proxy,self.find) for i in self.urls])
for res in tqdm(res_list, total=self.total, desc='当前进度'):
self.finish += 1
os.system(f'title 当前进度:{self.finish}/{self.total}')
Expand All @@ -42,8 +45,11 @@ def start(self) -> None:
print(f'{Fore.GREEN}程序被终止,结果已保存到{self.res_file_path}{Fore.RESET}')
os.system('pause')

def start(urls:list[str], proxy:str|None, max_cpu:str|None, force_cpu:str|None) -> None:
max_cpu = int(max_cpu) if max_cpu is not None else None
def start(urls:list[str], proxy:str|None, max_cpu:str|None, force_cpu:str|None, find:str|None) -> None:
max_cpu = int(max_cpu) if max_cpu is not None else 2 #默认为2进程
force_cpu = int(force_cpu) if force_cpu is not None else None
scanner = Scanner(urls,proxy=proxy,max_cpu=max_cpu,force_cpu=force_cpu)
if force_cpu is not None and force_cpu > 60:
print(f'{Fore.RED}警告:您设置的进程数过多,在部分系统可能导致出错{Fore.RESET}')
find = True if find is not None and (find == '1' or find.upper() == 'T') else False
scanner = Scanner(urls,proxy=proxy,max_cpu=max_cpu,force_cpu=force_cpu,find=find)
scanner.start()
4 changes: 2 additions & 2 deletions findinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
import sys
import os

def scan_findinfo() -> list[str]:
def scan_findinfo(id:str) -> list[str]:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}

fileurl = "/findinfo/JS"
fileurl = f"/findinfo/JS/{id}"

filemkdir = fileurl.split('_')[0]
if not os.path.exists(filemkdir):
Expand Down
2 changes: 1 addition & 1 deletion help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
特别声明:该脚本目前只针对于ASP.NET的站点
目前支持扫描的编辑器有
kindeditor ueditor fckeditor ckeditor ckfinder Cuteditor
多进程是根据cpu核心数量开启的,可以通过-c来设定进程数量但不能超过cpu核心数,也可以通过--force_cpu来强制设定进程数量
默认多进程开启2个,可以通过-c来设定进程数量但不能超过cpu核心数*5,也可以通过--force_cpu来强制设定进程数量
感谢JSfinder模块的技术支持:L@2uR1te 师傅
本人QQ:1723680383
4 changes: 3 additions & 1 deletion http_requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ def send_http_request(url, verify=False, timeout=5, headers=None, proxy=None):
proxies = {"http": proxy, "https": proxy} if proxy else None
response = requests.get(url, verify=verify, timeout=timeout, proxies=proxies)
return response
except requests.exceptions.RequestException as e:
except requests.exceptions.Timeout:
return -1
except requests.exceptions.RequestException:
return None

# 示例用法
Expand Down
62 changes: 39 additions & 23 deletions js_finder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import requests
import os
import time
import traceback
from pathlib import Path
from bs4 import BeautifulSoup
from config import Color
from colorama import Fore, Style
Expand All @@ -22,31 +25,42 @@
]

# 确保目标目录存在
js_dir = os.path.join('findinfo', 'js')
js_dir = os.path.join('findinfo', 'JS')
os.makedirs(js_dir, exist_ok=True)

# 设置控制目录级别的变量
max_directory_levels = 5

def delete_files_in_js_directory():
findinfo_directory = os.path.join(os.getcwd(), 'findinfo') # 获取findinfo目录的路径
js_directory = os.path.join(findinfo_directory, 'JS') # 构建JS目录的路径

def delete_files_in_js_directory(target_id: str|None = None):
# findinfo_directory = os.path.join(os.getcwd(), 'findinfo') # 获取findinfo目录的路径
findinfo_directory = Path.cwd() / 'findinfo'
# js_directory = os.path.join(findinfo_directory, 'JS') # 构建JS目录的路径
js_directory = findinfo_directory / 'JS'
def del_dir_and_files(dir_path: Path):
if not dir_path.exists():
return
for file_path in dir_path.iterdir():
file_path.unlink()
dir_path.rmdir()
try:
# 检查JS目录是否存在
if os.path.exists(js_directory):
if js_directory.exists():
# 删除JS目录中的所有文件
for file_name in os.listdir(js_directory):
file_path = os.path.join(js_directory, file_name)
if os.path.isfile(file_path):
os.remove(file_path)
if target_id is not None:
del_dir_and_files(js_directory / target_id)
else:
for id_path in js_directory.iterdir():
if not id_path.is_dir():
id_path.unlink()
continue
del_dir_and_files(id_path)
else:
print('在 findinfo 目录中未找到 "JS" 目录。')
except Exception as e:
print(f'发生错误:{e}')


def save_js_file(base_url, js_path):
def save_js_file(base_url, js_path, id:str):
try:
# 获取JS文件内容
js_url = urljoin(base_url, js_path)
Expand All @@ -58,16 +72,19 @@ def save_js_file(base_url, js_path):
js_filename = os.path.basename(js_path)

# 构造保存路径
save_path = os.path.join('findinfo', 'js', js_filename)

save_path = os.path.join('findinfo', 'js', id)
if not os.path.exists(save_path):
os.mkdir(save_path)
save_path = os.path.join(save_path, js_filename)
# 保存JS文件
with open(save_path, 'wb') as js_file: # 使用二进制模式保存
js_file.write(js_content)
except Exception as e:
print(f"保存JS文件时发生错误")
except Exception:
print(f"保存JS文件时发生错误:{traceback.format_exc()}")


def get_js_paths(url, proxy=None, find=None) -> str:
id = str(time.time()).replace('.','')
try:
res = ''
# 发送GET请求获取网页内容
Expand Down Expand Up @@ -106,7 +123,6 @@ def get_js_paths(url, proxy=None, find=None) -> str:
for i in range(1, min(max_directory_levels + 1, len(directory_parts) - 1)):
subpath = '/'.join(directory_parts[:i+1]) + '/'
separated_dirs.add(subpath)

# 打印所有JavaScript文件的路径
for path in filtered_js_paths:
matching_rule = None
Expand All @@ -122,7 +138,7 @@ def get_js_paths(url, proxy=None, find=None) -> str:
else:
print("目标JS文件:", path)
# 保存JS文件到目标目录
save_js_file(url, path)
save_js_file(url, path, id)

# 将逐个分离的不匹配的JavaScript目录路径写入文件
path = '\n'.join(i for i in separated_dirs)
Expand All @@ -140,17 +156,17 @@ def get_js_paths(url, proxy=None, find=None) -> str:
else:
print("发生其他值错误:")
except Exception as e:
print("发生其他错误:")
print("发生其他错误:{}".format(e))
raise

# 继续执行其他操作
#提取敏感信息
if find == 1:
if find == True:
print("正在提取JS文件中加载的敏感信息")
vars = findinfo.scan_findinfo()
vars = findinfo.scan_findinfo(id)
if len(vars) != 0:
res = '-'*30 + f'\n{url+"的敏感文件":^30}\n' + '-'*30 + '\n' + '\n'.join(vars) + '\n'
#删除js目录下的所有文件,保证网站JS文件唯一
delete_files_in_js_directory()
res = '-'*30 + f'\n{url+"的敏感信息":^30}\n' + '-'*30 + '\n' + '\n'.join(vars) + '\n'
delete_files_in_js_directory(id)

print("正在提取JS文件路径进行爆破")
return res
Expand Down
5 changes: 0 additions & 5 deletions js_separated.txt
Original file line number Diff line number Diff line change
@@ -1,5 +0,0 @@
/Scripts/mySelect/
/Scripts/libs/
/Scripts/StatisticsCount/
/Scripts/
/Scripts/My97DatePicker/
14 changes: 7 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
import argparse
import config
import sys # 添加这行导入语句
if sys.version_info.major < 3 or sys.version_info.minor < 10:
print('请使用Python3.10及以上版本运行')
sys.exit(1)
import create_process
from colorama import Fore, Style
from config import set_request_defaults, print_banner

if sys.version_info.major < 3 or sys.version_info.minor < 10:
print('请使用Python3.10及以上版本运行')
sys.exit(1)

# 使用 argparse 定义命令行参数和帮助信息
parser = argparse.ArgumentParser(description="script-scan (by 叫我十一大人)")
parser.add_argument("-u", "--url", help="扫描单个URL")
parser.add_argument("-f", "--file", help="从文本文件扫描URL")
parser.add_argument("-p", "--proxy", help="使用代理,格式如 https://127.0.0.1:8080")
parser.add_argument("-find", "--findinfo", help="提取JS文件中的敏感信息,植为1时开启")
parser.add_argument('-c', '--cpu', help='设置多进程数量上限,不能超过cpu核心数*5')
parser.add_argument("-find", "--findinfo", help="提取JS文件中的敏感信息,值为1或T时开启")
parser.add_argument('-c', '--cpu', help='设置多进程数量上限,不能超过cpu核心数*5,默认是2')
parser.add_argument('--force_cpu', help='强制设置多进程的数量')
args = parser.parse_args()

Expand All @@ -25,11 +25,11 @@

if args.url:
url_to_scan = args.url
create_process.start([url_to_scan], proxy=args.proxy, max_cpu=args.cpu, force_cpu=args.force_cpu)
create_process.start([url_to_scan], proxy=args.proxy, max_cpu=args.cpu, force_cpu=args.force_cpu, find=args.findinfo)
elif args.file:
with open(args.file, 'r') as file:
target_urls = [i for i in file.read().splitlines() if i.startswith('http')]
create_process.start(target_urls, proxy=args.proxy, max_cpu=args.cpu, force_cpu=args.force_cpu)
create_process.start(target_urls, proxy=args.proxy, max_cpu=args.cpu, force_cpu=args.force_cpu, find=args.findinfo)
else:
# 如果没有提供参数,打印帮助信息
parser.print_help()
Expand Down
18 changes: 9 additions & 9 deletions url_scanner.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,14 @@
# 子路径文件夹
path_files_folder = 'path_files'

def scan_urls(url, proxy=None, find=None) -> str:
def scan_urls(url, proxy=None, find=False) -> str:
res:list[str] = ['='*120 + '\n']
try:
print("正在测试URL:"+url+"\n")
global_main_paths_not_found = True
response = send_http_request(url, verify=False, timeout=5, headers=config.custom_headers, proxy=proxy) # 传递代理参数
if response is None:# 直接跳过
print(f'{Fore.RED}响应错误,跳过该站点({url}){Fore.RESET}')
if response == -1:# 直接跳过
print(f'{Fore.RED}响应超时,跳过该站点({url}){Fore.RESET}')
return ['']
if response.status_code == 200:
for main_path in main_paths:
Expand All @@ -63,8 +63,8 @@ def scan_urls(url, proxy=None, find=None) -> str:
#print(target_url)
try:
response = send_http_request(target_url, verify=False, timeout=5, headers=config.custom_headers, proxy=proxy)
if response is None:
print(f'{Fore.RED}响应错误,跳过该站点{Fore.RESET}')
if response == -1:
print(f'{Fore.RED}响应超时,跳过该站点{Fore.RESET}')
break
if response.status_code in [200, 403]:
main_path_found = True
Expand Down Expand Up @@ -95,8 +95,8 @@ def scan_urls(url, proxy=None, find=None) -> str:
#print(response.status_code) # 输出状态码
#print(response.headers) # 输出头部信息
#print(response.text) # 输出响应体的内容
if response is None:
print(f'{Fore.RED}响应错误,跳过该站点{Fore.RESET}')
if response == -1:
print(f'{Fore.RED}响应超时,跳过该站点{Fore.RESET}')
break
if response.status_code == 302:
# 如果状态码为302,跳过当前URL
Expand Down Expand Up @@ -180,8 +180,8 @@ def scan_urls(url, proxy=None, find=None) -> str:
for payload in payloads:
complete_url = url + js_path + payload
response = send_http_request(complete_url, verify=False, timeout=10, headers=config.custom_headers, proxy=proxy)
if response is None:
print(f'{Fore.RED}响应错误,跳过该站点{Fore.RESET}')
if response == -1:
print(f'{Fore.RED}响应超时,跳过该站点{Fore.RESET}')
skip = True
break
try:
Expand Down