Delete parsers directory
This commit is contained in:
parent
fa0f2e17fb
commit
f16c0f3a82
@ -1,81 +0,0 @@
|
||||
# Privilege Escalation Awesome Scripts Parsers
|
||||
|
||||
These scripts allows you to transform the output of linpeas/macpeas/winpeas to JSON and then to PDF and HTML.
|
||||
|
||||
```python3
|
||||
python3 peass2json.py </path/to/executed_peass.out> </path/to/peass.json>
|
||||
python3 json2pdf.py </path/to/peass.json> </path/to/peass.pdf>
|
||||
python3 json2html.py </path/to/peass.json> </path/to/peass.html>
|
||||
```
|
||||
|
||||
|
||||
## JSON Format
|
||||
Basically, **each section has**:
|
||||
- Infos (URLs or info about the section)
|
||||
- Text lines (the real text info found in the section, colors included)
|
||||
- More sections
|
||||
|
||||
There is a **maximun of 3 levels of sections**.
|
||||
|
||||
```json
|
||||
{
|
||||
"<Main Section Name>": {
|
||||
"sections": {
|
||||
"<Secondary Section Name>": {
|
||||
"sections": {},
|
||||
"lines": [
|
||||
{
|
||||
"raw_text": "\u001b[0m\u001b[1;33m[+] \u001b[1;32mnmap\u001b[1;34m is available for network discover & port scanning, you should use it yourself",
|
||||
"clean_text": "[+] is available for network discover & port scanning, you should use it yourself",
|
||||
"colors": {
|
||||
"GREEN": [
|
||||
"nmap"
|
||||
],
|
||||
"YELLOW": [
|
||||
"[+]"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"infos": [
|
||||
"https://book.hacktricks.xyz/linux-hardening/privilege-escalation#kernel-exploits"
|
||||
]
|
||||
},
|
||||
"infos": []
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"System Information": {
|
||||
"sections": {
|
||||
"Operative system": {
|
||||
"sections": {},
|
||||
"lines": [
|
||||
{
|
||||
"raw_text": "\u001b[0m\u001b[1;33m[+] \u001b[1;32mnmap\u001b[1;34m is available for network discover & port scanning, you should use it yourself",
|
||||
"clean_text": "[+] is available for network discover & port scanning, you should use it yourself",
|
||||
"colors": {
|
||||
"GREEN": [
|
||||
"nmap"
|
||||
],
|
||||
"YELLOW": [
|
||||
"[+]"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"infos": [
|
||||
"https://book.hacktricks.xyz/linux-hardening/privilege-escalation#kernel-exploits"
|
||||
]
|
||||
},
|
||||
"infos": []
|
||||
```
|
||||
|
||||
|
||||
There can also be a `<Third level Section Name>`
|
||||
|
||||
If you need to transform several outputs check out https://github.com/mnemonic-re/parsePEASS
|
||||
|
||||
# TODO:
|
||||
|
||||
- **PRs improving the code and the aspect of the final PDFs and HTMLs are always welcome!**
|
File diff suppressed because one or more lines are too long
@ -1,162 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import json
|
||||
import html
|
||||
from reportlab.lib.pagesizes import letter
|
||||
from reportlab.platypus import Frame, Paragraph, Spacer, PageBreak,PageTemplate, BaseDocTemplate
|
||||
from reportlab.platypus.tableofcontents import TableOfContents
|
||||
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
||||
from reportlab.lib.units import cm
|
||||
|
||||
styles = getSampleStyleSheet()
|
||||
text_colors = { "GREEN": "#00DB00", "RED": "#FF0000", "REDYELLOW": "#FFA500", "BLUE": "#0000FF",
|
||||
"DARKGREY": "#5C5C5C", "YELLOW": "#ebeb21", "MAGENTA": "#FF00FF", "CYAN": "#00FFFF", "LIGHT_GREY": "#A6A6A6"}
|
||||
|
||||
# Required to automatically set Page Numbers
|
||||
class PageTemplateWithCount(PageTemplate):
|
||||
def __init__(self, id, frames, **kw):
|
||||
PageTemplate.__init__(self, id, frames, **kw)
|
||||
|
||||
def beforeDrawPage(self, canvas, doc):
|
||||
page_num = canvas.getPageNumber()
|
||||
canvas.drawRightString(10.5*cm, 1*cm, str(page_num))
|
||||
|
||||
# Required to automatically set the Table of Contents
|
||||
class MyDocTemplate(BaseDocTemplate):
|
||||
def __init__(self, filename, **kw):
|
||||
self.allowSplitting = 0
|
||||
BaseDocTemplate.__init__(self, filename, **kw)
|
||||
template = PageTemplateWithCount("normal", [Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')])
|
||||
self.addPageTemplates(template)
|
||||
|
||||
def afterFlowable(self, flowable):
|
||||
if flowable.__class__.__name__ == "Paragraph":
|
||||
text = flowable.getPlainText()
|
||||
style = flowable.style.name
|
||||
if style == "Heading1":
|
||||
self.notify("TOCEntry", (0, text, self.page))
|
||||
if style == "Heading2":
|
||||
self.notify("TOCEntry", (1, text, self.page))
|
||||
if style == "Heading3":
|
||||
self.notify("TOCEntry", (2, text, self.page))
|
||||
|
||||
|
||||
# Poor take at dynamicly generating styles depending on depth(?)
|
||||
def get_level_styles(level):
|
||||
global styles
|
||||
indent_value = 10 * (level - 1);
|
||||
# Overriding some default stylings
|
||||
level_styles = {
|
||||
"title": ParagraphStyle(
|
||||
**dict(styles[f"Heading{level}"].__dict__,
|
||||
**{ "leftIndent": indent_value })),
|
||||
"text": ParagraphStyle(
|
||||
**dict(styles["Code"].__dict__,
|
||||
**{ "backColor": "#F0F0F0",
|
||||
"borderPadding": 5, "borderWidth": 1,
|
||||
"borderColor": "black", "borderRadius": 5,
|
||||
"leftIndent": 5 + indent_value})),
|
||||
"info": ParagraphStyle(
|
||||
**dict(styles["Italic"].__dict__,
|
||||
**{ "leftIndent": indent_value })),
|
||||
}
|
||||
return level_styles
|
||||
|
||||
def get_colors_by_text(colors):
|
||||
new_colors = {}
|
||||
for (color, words) in colors.items():
|
||||
for word in words:
|
||||
new_colors[html.escape(word)] = color
|
||||
return new_colors
|
||||
|
||||
def build_main_section(section, title, level=1):
|
||||
styles = get_level_styles(level)
|
||||
has_links = "infos" in section.keys() and len(section["infos"]) > 0
|
||||
has_lines = "lines" in section.keys() and len(section["lines"]) > 1
|
||||
has_children = "sections" in section.keys() and len(section["sections"].keys()) > 0
|
||||
|
||||
# Only display data for Sections with results
|
||||
show_section = has_lines or has_children
|
||||
|
||||
elements = []
|
||||
|
||||
if show_section:
|
||||
elements.append(Paragraph(title, style=styles["title"]))
|
||||
|
||||
# Print info if any
|
||||
if show_section and has_links:
|
||||
for info in section["infos"]:
|
||||
words = info.split()
|
||||
# Join all lines and encode any links that might be present.
|
||||
words = map(lambda word: f'<a href="{word}" color="blue">{word}</a>' if "http" in word else word, words)
|
||||
words = " ".join(words)
|
||||
elements.append(Paragraph(words, style=styles["info"] ))
|
||||
|
||||
# Print lines if any
|
||||
if "lines" in section.keys() and len(section["lines"]) > 1:
|
||||
colors_by_line = list(map(lambda x: x["colors"], section["lines"]))
|
||||
lines = list(map(lambda x: html.escape(x["clean_text"]), section["lines"]))
|
||||
for (idx, line) in enumerate(lines):
|
||||
colors = colors_by_line[idx]
|
||||
colored_text = get_colors_by_text(colors)
|
||||
colored_line = line
|
||||
for (text, color) in colored_text.items():
|
||||
if color == "REDYELLOW":
|
||||
colored_line = colored_line.replace(text, f'<font color="{text_colors[color]}"><b>{text}</b></font>')
|
||||
else:
|
||||
colored_line = colored_line.replace(text, f'<font color="{text_colors[color]}">{text}</font>')
|
||||
lines[idx] = colored_line
|
||||
elements.append(Spacer(0, 10))
|
||||
line = "<br/>".join(lines)
|
||||
|
||||
# If it's a top level entry remove the line break caused by an empty "clean_text"
|
||||
if level == 1: line = line[5:]
|
||||
elements.append(Paragraph(line, style=styles["text"]))
|
||||
|
||||
|
||||
# Print child sections
|
||||
if has_children:
|
||||
for child_title in section["sections"].keys():
|
||||
element_list = build_main_section(section["sections"][child_title], child_title, level + 1)
|
||||
elements.extend(element_list)
|
||||
|
||||
# Add spacing at the end of section. The deeper the level the smaller the spacing.
|
||||
if show_section:
|
||||
elements.append(Spacer(1, 40 - (10 * level)))
|
||||
|
||||
return elements
|
||||
|
||||
|
||||
def main():
|
||||
with open(JSON_PATH) as file:
|
||||
# Read and parse JSON file
|
||||
data = json.loads(file.read())
|
||||
|
||||
# Default pdf values
|
||||
doc = MyDocTemplate(PDF_PATH)
|
||||
toc = TableOfContents()
|
||||
toc.levelStyles = [
|
||||
ParagraphStyle(name = "Heading1", fontSize = 14, leading=16),
|
||||
ParagraphStyle(name = "Heading2", fontSize = 12, leading=14, leftIndent = 10),
|
||||
ParagraphStyle(name = "Heading3", fontSize = 10, leading=12, leftIndent = 20),
|
||||
]
|
||||
|
||||
elements = [Paragraph("PEAS Report", style=styles["Title"]), Spacer(0, 30), toc, PageBreak()]
|
||||
|
||||
# Iterate over all top level sections and build their elements.
|
||||
for title in data.keys():
|
||||
element_list = build_main_section(data[title], title)
|
||||
elements.extend(element_list)
|
||||
|
||||
doc.multiBuild(elements)
|
||||
|
||||
# Start execution
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
JSON_PATH = sys.argv[1]
|
||||
PDF_PATH = sys.argv[2]
|
||||
except IndexError as err:
|
||||
print("Error: Please pass the peas.json file and the path to save the pdf\njson2pdf.py <json_file> <pdf_file.pdf>")
|
||||
sys.exit(1)
|
||||
|
||||
main()
|
@ -1,168 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import re
|
||||
import json
|
||||
|
||||
# Pattern to identify main section titles
|
||||
TITLE1_PATTERN = r"══════════════╣" # The size of the first pattern varies, but at least should be that large
|
||||
TITLE2_PATTERN = r"╔══════════╣"
|
||||
TITLE3_PATTERN = r"══╣"
|
||||
INFO_PATTERN = r"╚ "
|
||||
TITLE_CHARS = ['═', '╔', '╣', '╚']
|
||||
|
||||
# Patterns for colors
|
||||
## The order is important, the first string colored with a color will be the one selected (the same string cannot be colored with different colors)
|
||||
COLORS = {
|
||||
"REDYELLOW": ['\x1b[1;31;103m'],
|
||||
"RED": ['\x1b[1;31m'],
|
||||
"GREEN": ['\x1b[1;32m'],
|
||||
"YELLOW": ['\x1b[1;33m'],
|
||||
"BLUE": ['\x1b[1;34m'],
|
||||
"MAGENTA": ['\x1b[1;95m', '\x1b[1;35m'],
|
||||
"CYAN": ['\x1b[1;36m', '\x1b[1;96m'],
|
||||
"LIGHT_GREY": ['\x1b[1;37m'],
|
||||
"DARKGREY": ['\x1b[1;90m'],
|
||||
}
|
||||
|
||||
|
||||
# Final JSON structure
|
||||
FINAL_JSON = {}
|
||||
|
||||
#Constructing the structure
|
||||
C_SECTION = FINAL_JSON
|
||||
C_MAIN_SECTION = FINAL_JSON
|
||||
C_2_SECTION = FINAL_JSON
|
||||
C_3_SECTION = FINAL_JSON
|
||||
|
||||
|
||||
|
||||
|
||||
def is_section(line: str, pattern: str) -> bool:
|
||||
"""Returns a boolean
|
||||
|
||||
Checks if line matches the pattern and returns True or False
|
||||
"""
|
||||
return line.find(pattern) > -1
|
||||
|
||||
def get_colors(line: str) -> dict:
|
||||
"""Given a line return the colored strings"""
|
||||
|
||||
colors = {}
|
||||
for c,regexs in COLORS.items():
|
||||
colors[c] = []
|
||||
for reg in regexs:
|
||||
split_color = line.split(reg)
|
||||
|
||||
# Start from the index 1 as the index 0 isn't colored
|
||||
if split_color and len(split_color) > 1:
|
||||
split_color = split_color[1:]
|
||||
|
||||
# For each potential color, find the string before any possible color terminatio
|
||||
for potential_color_str in split_color:
|
||||
color_str1 = potential_color_str.split('\x1b')[0]
|
||||
color_str2 = potential_color_str.split("\[0")[0]
|
||||
color_str = color_str1 if len(color_str1) < len(color_str2) else color_str2
|
||||
|
||||
if color_str:
|
||||
color_str = clean_colors(color_str.strip())
|
||||
#Avoid having the same color for the same string
|
||||
if color_str and not any(color_str in values for values in colors.values()):
|
||||
colors[c].append(color_str)
|
||||
|
||||
if not colors[c]:
|
||||
del colors[c]
|
||||
|
||||
return colors
|
||||
|
||||
def clean_title(line: str) -> str:
|
||||
"""Given a title clean it"""
|
||||
for c in TITLE_CHARS:
|
||||
line = line.replace(c,"")
|
||||
|
||||
line = line.encode("ascii", "ignore").decode() #Remove non ascii chars
|
||||
line = line.strip()
|
||||
return line
|
||||
|
||||
def clean_colors(line: str) -> str:
|
||||
"""Given a line clean the colors inside of it"""
|
||||
|
||||
for reg in re.findall(r'\x1b\[[^a-zA-Z]+\dm', line):
|
||||
line = line.replace(reg,"")
|
||||
|
||||
line = line.replace('\x1b',"").replace("[0m", "").replace("[3m", "") #Sometimes that byte stays
|
||||
line = line.strip()
|
||||
return line
|
||||
|
||||
|
||||
def parse_title(line: str) -> str:
|
||||
""" Given a title, clean it"""
|
||||
|
||||
return clean_colors(clean_title(line))
|
||||
|
||||
|
||||
def parse_line(line: str):
|
||||
"""Parse the given line adding it to the FINAL_JSON structure"""
|
||||
|
||||
global FINAL_JSON, C_SECTION, C_MAIN_SECTION, C_2_SECTION, C_3_SECTION
|
||||
|
||||
if "Cron jobs" in line:
|
||||
a=1
|
||||
|
||||
if is_section(line, TITLE1_PATTERN):
|
||||
title = parse_title(line)
|
||||
FINAL_JSON[title] = { "sections": {}, "lines": [], "infos": [] }
|
||||
C_MAIN_SECTION = FINAL_JSON[title]
|
||||
C_SECTION = C_MAIN_SECTION
|
||||
|
||||
elif is_section(line, TITLE2_PATTERN):
|
||||
title = parse_title(line)
|
||||
C_MAIN_SECTION["sections"][title] = { "sections": {}, "lines": [], "infos": [] }
|
||||
C_2_SECTION = C_MAIN_SECTION["sections"][title]
|
||||
C_SECTION = C_2_SECTION
|
||||
|
||||
elif is_section(line, TITLE3_PATTERN):
|
||||
title = parse_title(line)
|
||||
C_2_SECTION["sections"][title] = { "sections": {}, "lines": [], "infos": [] }
|
||||
C_3_SECTION = C_2_SECTION["sections"][title]
|
||||
C_SECTION = C_3_SECTION
|
||||
|
||||
elif is_section(line, INFO_PATTERN):
|
||||
title = parse_title(line)
|
||||
C_SECTION["infos"].append(title)
|
||||
|
||||
#If here, then it's text
|
||||
else:
|
||||
#If no main section parsed yet, pass
|
||||
if C_SECTION == {}:
|
||||
return
|
||||
|
||||
C_SECTION["lines"].append({
|
||||
"raw_text": line,
|
||||
"colors": get_colors(line),
|
||||
"clean_text": clean_title(clean_colors(line))
|
||||
})
|
||||
|
||||
|
||||
def main():
|
||||
for line in open(OUTPUT_PATH, 'r', encoding="utf8").readlines():
|
||||
line = line.strip()
|
||||
if not line or not clean_colors(line): #Remove empty lines or lines just with colors hex
|
||||
continue
|
||||
|
||||
parse_line(line)
|
||||
|
||||
with open(JSON_PATH, "w") as f:
|
||||
json.dump(FINAL_JSON, f)
|
||||
|
||||
|
||||
# Start execution
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
OUTPUT_PATH = sys.argv[1]
|
||||
JSON_PATH = sys.argv[2]
|
||||
except IndexError as err:
|
||||
print("Error: Please pass the peas.out file and the path to save the json\npeas2json.py <output_file> <json_file.json>")
|
||||
sys.exit(1)
|
||||
|
||||
main()
|
Loading…
Reference in New Issue
Block a user