Skip to content

Commit

Permalink
add: init frontend + general fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
domysh committed Jun 9, 2024
1 parent a1cef82 commit 91aed76
Show file tree
Hide file tree
Showing 36 changed files with 1,672 additions and 125 deletions.
2 changes: 2 additions & 0 deletions backend/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@ async def set_status(data: Dict[str, str|int|None]):
raise HTTPException(400, f"Invalid key {key}")
if key == "SETUP_STATUS" and config.SETUP_STATUS != SetupStatus.SETUP:
raise HTTPException(400, "Setup status cannot be changed back to setup")
if key == "SERVER_ID":
raise HTTPException(400, "Server ID cannot be changed")
if key == "SUBMITTER":
if not await Submitter.objects.get_or_none(id=data[key]):
raise HTTPException(400, "Submitter not found")
Expand Down
2 changes: 2 additions & 0 deletions backend/models/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ class Configuration(BaseModel):

SETUP_STATUS: SetupStatus = SetupStatus.SETUP

SERVER_ID: UUID = uuid4()

@property
def login_enabled(self):
return self.AUTHENTICATION_REQUIRED and self.SETUP_STATUS != SetupStatus.SETUP
Expand Down
5 changes: 5 additions & 0 deletions backend/models/flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,8 @@ class FlagDTO(BaseModel):
submit_attempts: int = 0
attack: FlagDTOAttackDetails

class FlagStats(BaseModel):
timeout_flags: int
wait_flags: int
invalid_flags: int
ok_flags: int
15 changes: 15 additions & 0 deletions backend/routes/flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from fastapi_pagination.bases import AbstractParams, BasePage, RawParams
from fastapi_pagination.types import GreaterEqualOne, GreaterEqualZero
from fastapi_pagination.utils import create_pydantic_model
import asyncio

T = TypeVar("T")

Expand Down Expand Up @@ -101,5 +102,19 @@ async def get_flags(
.order_by(Flag.attack.recieved_at.desc())
.order_by(Flag.id.desc())
)
@router.get("/stats", response_model=FlagStats)
async def get_flags():
results = await asyncio.gather(
Flag.objects.filter(Flag.status == FlagStatus.timeout.value).count(),
Flag.objects.filter(Flag.status == FlagStatus.wait.value).count(),
Flag.objects.filter(Flag.status == FlagStatus.invalid.value).count(),
Flag.objects.filter(Flag.status == FlagStatus.ok.value).count(),
)
return FlagStats(
timeout_flags=results[0],
wait_flags=results[1],
invalid_flags=results[2],
ok_flags=results[3]
)

add_pagination(router)
6 changes: 5 additions & 1 deletion backend/submitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,11 @@ async def run_submit_routine(flags: List[Flag]):

async def submit_flags_task():
if g.config.FLAG_TIMEOUT:
await Flag.objects.filter(Flag.attack.recieved_at < (datetime_now() - timedelta(seconds=g.config.FLAG_TIMEOUT)), Flag.status == FlagStatus.wait.value).update(status=FlagStatus.timeout.value)
await Flag.objects.filter(
(Flag.attack.recieved_at > (datetime_now() + timedelta(seconds=g.config.FLAG_TIMEOUT))) & (Flag.status == FlagStatus.wait.value)
).update(
status=FlagStatus.timeout.value, status_text="⚠️ Timeouted by Exploitfarm due to FLAG_TIMEOUT"
)
flags_to_submit = Flag.objects.filter(status=FlagStatus.wait.value).order_by(Flag.attack.recieved_at.asc())
if g.config.FLAG_SUBMIT_LIMIT is None:
flags_to_submit = await flags_to_submit.all()
Expand Down
17 changes: 17 additions & 0 deletions client/exploitfarm/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import os, socket
from fasteners import InterProcessLock
import sys
import psutil
import logging

DEFAULT_SERVER_PORT = 5050

Expand Down Expand Up @@ -29,3 +32,17 @@ def create_lock(name:str) -> InterProcessLock:
if not os.path.exists(base_path): os.makedirs(base_path)
return InterProcessLock(file_path)

def restart_program():
"""Restarts the current program, with file objects and descriptors
cleanup
"""

try:
p = psutil.Process(os.getpid())
for handler in p.get_open_files() + p.connections():
os.close(handler.fd)
except Exception as e:
pass

python = sys.executable
os.execl(python, python, *sys.argv)
124 changes: 67 additions & 57 deletions client/exploitfarm/utils/cmd/startxploit.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,30 +13,31 @@
from textual.reactive import var
from datetime import timedelta
import dateutil.parser
from copy import deepcopy

class g:
config: ClientConfig
memory: dict
exploit_config: ExploitConfig
print_queue: Queue
pool_size: int
exit_event = threading.Event()
exit_event: threading.Event
restart_event: threading.Event

def stop_screen():
g.exit_event.set()
g.print_queue.put("Stopping exploit execution..")

def start_exploit_tui(config: ClientConfig, memory: dict, exploit_config: ExploitConfig, print_queue: Queue, pool_size:int):
g.config = config
def start_exploit_tui(memory: dict, exploit_config: ExploitConfig, print_queue: Queue, pool_size:int, exit_event: threading.Event, restart_event: threading.Event):
g.memory = memory
g.exploit_config = exploit_config
g.print_queue = print_queue
g.pool_size = pool_size
XploitRun(g.config).run()
g.exit_event = exit_event
g.restart_event = restart_event
XploitRun().run()
stop_screen()

#TODO graph doesn't work!

class FlagGraph(PlotextPlot):
"""A plot of the flags submitted by the exploit."""

Expand Down Expand Up @@ -80,57 +81,54 @@ def replot(self) -> None:
self.plt.plot(self._time, self._data, marker=self.marker)
self.refresh()
def update(self) -> None:
try:
teams = g.memory.get("config", {}).get("teams", [])
default_cache = {"data_graph":[], "teams_index":{}, "tick_duration":g.config.status["config"]["TICK_DURATION"]}
graph_cache = g.memory.get("graph_cache", default_cache)

if graph_cache["tick_duration"] != g.config.status["config"]["TICK_DURATION"]:
graph_cache = default_cache

data_to_analyse = []
for team in teams:
team_key = f"team-{team['id']}"
history = g.memory.get(team_key, [])[graph_cache["teams_index"].get(team_key,0):].copy()
graph_cache["teams_index"][team_key] = graph_cache["teams_index"].get(team_key,0) + len(history)
for ele in history:
end_time = ele.get("end_time", None)
if end_time is None: continue
flags = ele.get("flags", 0)
if isinstance(end_time, str):
end_time = dateutil.parser.parse(end_time)
data_to_analyse.append((end_time, flags))
data_to_analyse = sorted(data_to_analyse, key=lambda x: x[0].timestamp())
if not data_to_analyse: return
data_graph = graph_cache["data_graph"]
if not data_graph:
data_graph = [ (data_to_analyse[0][0], 0) ]
dg_index = len(data_graph)-1
timeoffset = timedelta(seconds=g.config.status["config"]["TICK_DURATION"])
timeend = data_graph[-1][0]+timeoffset
for ele in data_to_analyse:
if ele[0] <= timeend:
data_graph[dg_index] = ((data_graph[dg_index][0], data_graph[dg_index][1]+ele[1]))
else:
data_graph.append((timeend, ele[1]))
timeend += timeoffset
dg_index += 1
graph_cache["data_graph"] = data_graph
g.memory["graph_cache"] = graph_cache

self._data = [ele[1] for ele in data_graph]
self._time = [ele[0].strftime("%d/%m/%Y %H:%M:%S.%f") for ele in data_graph]
self.replot()
except Exception as e:
g.print_queue.put(f"Error while updating graph: {e}")
return
config: ClientConfig = deepcopy(g.memory.get("config", None))
if config is None: return
teams = config.get("teams", [])
default_cache = {"data_graph":[], "teams_index":{}, "tick_duration":config["config"]["TICK_DURATION"]}
graph_cache = g.memory.get("graph_cache", default_cache)

if graph_cache["tick_duration"] != config["config"]["TICK_DURATION"]:
graph_cache = default_cache

data_to_analyse = []
for team in teams:
team_key = f"team-{team['id']}"
history = g.memory.get(team_key, [])[graph_cache["teams_index"].get(team_key,0):].copy()
graph_cache["teams_index"][team_key] = graph_cache["teams_index"].get(team_key,0) + len(history)
for ele in history:
end_time = ele.get("end_time", None)
if end_time is None: continue
flags = ele.get("flags", 0)
if isinstance(end_time, str):
end_time = dateutil.parser.parse(end_time)
data_to_analyse.append((end_time, flags))
data_to_analyse = sorted(data_to_analyse, key=lambda x: x[0].timestamp())
if not data_to_analyse: return
data_graph = graph_cache["data_graph"]
if not data_graph:
data_graph = [ (data_to_analyse[0][0], 0) ]
dg_index = len(data_graph)-1
timeoffset = timedelta(seconds=config["config"]["TICK_DURATION"])
timeend = data_graph[-1][0]+timeoffset
for ele in data_to_analyse:
if ele[0] <= timeend:
data_graph[dg_index] = ((data_graph[dg_index][0], data_graph[dg_index][1]+ele[1]))
else:
data_graph.append((timeend, ele[1]))
timeend += timeoffset
dg_index += 1
graph_cache["data_graph"] = data_graph
g.memory["graph_cache"] = graph_cache

self._data = [ele[1] for ele in data_graph]
self._time = [ele[0].strftime("%d/%m/%Y %H:%M:%S.%f") for ele in data_graph]
self.replot()

class XploitRun(App):

def __init__(self, config: ClientConfig):
def __init__(self):
super().__init__()
self.title = f"xFarm - Exploit execution of {g.exploit_config.name}"
self.config = config
self.title = f"xFarm - Exploit execution of {g.exploit_config.name}"

BINDINGS = [
("ctrl+c", "cancel()", "Close attack"),
Expand Down Expand Up @@ -235,14 +233,20 @@ def update_data(self):
table.rows.clear()
column_keys = table.add_columns(*[Text(text="\n\n"+ele, justify="center") for ele in self.COLUMNS])
while True:
config = g.memory.get("config", {})
config: ClientConfig = deepcopy(g.memory.get("config", None))
if config is None:
if g.exit_event.wait(timeout=1): return
continue
next_attack = g.memory.get("next_attack_at", None)
submitter_flag_queue = g.memory.get("submitter_flags_in_queue", None)
submitter_flag_queue = submitter_flag_queue if not submitter_flag_queue is None else "❓"
submitter_status = g.memory.get("submitter_status", None)
submitter_status = "🟢" if submitter_status else "[bold red]❌ ( Can't submit flags )[/]" if not submitter_status is None else "❓"
config_updater = g.memory.get("config_update", None)
config_updater = "🟢" if config_updater else "[bold red]❌ (Check server is alive)[/]" if not config_updater is None else "❓"
if config_updater == True:
config_updater = "🟢" if config.get("status", None) == "running" else "[bold red]⚠️ Server is not in running mode![/]"
else:
config_updater = "🟢" if config_updater else "[bold red]❌ (Check server is alive)[/]" if not config_updater is None else "❓"
this_time = dt.now(datetime.timezone.utc)
delta_next_attack = str(next_attack - this_time).split(".")[0]+" s" if next_attack and next_attack > this_time else "❓"
status_text = self.query_one("#status_text", Label)
Expand All @@ -255,7 +259,7 @@ def update_data(self):
f"Exploit timeout: [bold yellow]{g.memory.get('runtime_timeout', 'Unknown')} s[/]\t\t\t"
f"Tick Duration: [bold green]{config.get('config', {}).get('TICK_DURATION', 'Unknown')} s[/]\t\t\t"
f"Workers: [bold green]{g.pool_size}[/]\n"
f"Server connection: [bold green]{config_updater}[/]\t\t\t"
f"Server connection: [bold]{config_updater}[/]\t\t\t"
f"Next Attacks: [bold green]{delta_next_attack}[/]\t\t\t"
f"Flag Format: [bold green]{escape(config.get('config', {}).get('FLAG_REGEX', 'Unknown'))}[/]"
)
Expand Down Expand Up @@ -287,12 +291,18 @@ def flag_graph_update(self):
with self.batch_update():
self.query_one("#flags_graph", FlagGraph).update()
if g.exit_event.wait(timeout=10): return


def exit_listener(self):
if g.exit_event.wait():
stop_screen()
self.exit(0)

def on_ready(self) -> None:
"""Called when the DOM is ready."""
self.run_worker(self.update_data, thread=True)
self.run_worker(self.update_log, thread=True)
self.run_worker(self.flag_graph_update, thread=True)
self.run_worker(self.exit_listener, thread=True)

def compose(self) -> ComposeResult:
yield Header(self.title)
Expand Down
Loading

0 comments on commit 91aed76

Please sign in to comment.