Skip to content

Commit

Permalink
Python3.8.* support, MacOS-sec-features-compliant, re-write PyScript …
Browse files Browse the repository at this point in the history
…and Nmap wrapper engines (#93)

* Update Nmap and PyScript engines, python3.8 support, MacOS-sec-features support

* Format with Black

* Add FreeType as a requirement

* Update README.md
  • Loading branch information
manmolecular authored Apr 3, 2020
1 parent f431819 commit 868b506
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 63 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
<p align="center"><i>The basic CLI interface of the Grinder Framework</i></p>
</div>

## Contents
## Table of Contents
1. [Description](#description)
1. [Grinder Workflow](#grinder-workflow)
1. [Grinder Map](#grinder-map)
Expand Down Expand Up @@ -89,6 +89,7 @@ For example, the hosts will be automatically checked for availability with ping
### Basic
- :heavy_exclamation_mark: [Python 3.6+](https://www.python.org/downloads/)
- :heavy_exclamation_mark: [python3-tk](https://docs.python.org/3/library/tkinter.html) library
- :heavy_exclamation_mark: [FreeType](https://www.freetype.org/) library (Python 3.8+ and MacOS required)
### Accounts
- :heavy_exclamation_mark: [Shodan](https://account.shodan.io/register) and [Censys](https://censys.io/register) accounts
Required to collect hosts, both free and full accounts are suitable. Also, it's possible to use only one account (Censys or Shodan, Shodan is preferable).
Expand Down
22 changes: 19 additions & 3 deletions grinder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,28 @@

import sys

from multiprocessing import freeze_support

from grinder.asciiart import AsciiOpener
from grinder.core import GrinderCore
from grinder.interface import GrinderInterface


class GrinderProcessWrap:
"""
Fix Processes "RuntimeError" related
to bootstrapping phase (MacOS case)
"""

@staticmethod
def import_core():
freeze_support()
from grinder.core import GrinderCore as _core

return _core


if __name__ == "__main__":
GrinderCore = GrinderProcessWrap.import_core()
AsciiOpener.print_opener()
interface = GrinderInterface()
interface.check_python_version()
Expand Down Expand Up @@ -34,8 +51,7 @@

search_results = (
core.batch_search(
queries_filename=args.queries_file,
not_incremental=args.not_incremental
queries_filename=args.queries_file, not_incremental=args.not_incremental
)
if args.run
else core.load_results(queries_filename=args.queries_file)
Expand Down
71 changes: 42 additions & 29 deletions grinder/nmapprocessmanager.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
#!/usr/bin/env python3

from multiprocessing import Process, JoinableQueue, Manager
from datetime import datetime
from multiprocessing import Process, JoinableQueue, Manager, freeze_support
from os import system
from time import sleep
from datetime import datetime

from grinder.decorators import exception_handler
from grinder.defaultvalues import DefaultProcessManagerValues
from grinder.errors import (
NmapProcessingRunError,
NmapProcessingManagerOrganizeProcessesError,
)
from grinder.nmapconnector import NmapConnector
from grinder.defaultvalues import DefaultProcessManagerValues


class NmapProcessingDefaultManagerValues:
Expand All @@ -21,15 +21,7 @@ class NmapProcessingDefaultManagerValues:

POLLING_RATE = 0.5
EMPTY_QUEUE_POLLING_RATE = 1.0


class NmapProcessingResults:
"""
This is results collector to gain
results directly from a process
"""

RESULTS = Manager().dict({})
PROCESS_TIMEOUT = 300


class NmapProcessing(Process):
Expand All @@ -49,13 +41,15 @@ def __init__(
ports: str,
sudo: bool,
hosts_quantity: int,
results_pool: dict,
):
Process.__init__(self)
self.queue = queue
self.arguments = arguments
self.ports = ports
self.sudo = sudo
self.quantity = hosts_quantity
self.results_pool = results_pool

@exception_handler(expected_exception=NmapProcessingRunError)
def run(self) -> None:
Expand All @@ -74,9 +68,15 @@ def run(self) -> None:
sleep(NmapProcessingDefaultManagerValues.EMPTY_QUEUE_POLLING_RATE)
continue
try:
index, host = self.queue.get()
# Poll with POLLING_RATE interval
sleep(NmapProcessingDefaultManagerValues.POLLING_RATE)

# Get host info from queue
index, host = self.queue.get()
if (index, host) == (None, None):
self.queue.task_done()
return

host_ip = host.get("ip", "")
host_port = host.get("port", "")
port_postfix = "Default"
Expand All @@ -103,13 +103,12 @@ def run(self) -> None:

results = nm.get_results()
if results.get(host_ip).values():
NmapProcessingResults.RESULTS.update(
{host_ip: results.get(host_ip)}
)
self.results_pool.update({host_ip: results.get(host_ip)})
except:
self.queue.task_done()
else:
self.queue.task_done()
pass
self.queue.task_done()
if self.queue.empty():
return


class NmapProcessingManager:
Expand All @@ -127,6 +126,9 @@ def __init__(
arguments=DefaultProcessManagerValues.ARGUMENTS,
workers=DefaultProcessManagerValues.WORKERS,
):
freeze_support()
self.manager = Manager()
self.results_pool = self.manager.dict({})
self.hosts = hosts
self.workers = workers
self.arguments = arguments
Expand All @@ -140,18 +142,31 @@ def organize_processes(self) -> None:
:return: None
"""
queue = JoinableQueue()
for index, host in enumerate(self.hosts):
queue.put((index, host))
processes = []
for _ in range(self.workers):
freeze_support()
process = NmapProcessing(
queue, self.arguments, self.ports, self.sudo, len(self.hosts)
queue,
self.arguments,
self.ports,
self.sudo,
len(self.hosts),
self.results_pool,
)
process.daemon = True
processes.append(process)
for process in processes:
process.start()
try:
process.start()
except OSError:
pass
for index, host in enumerate(self.hosts):
queue.put((index, host))
for _ in range(self.workers):
queue.put((None, None))
queue.join()
for process in processes:
process.join(timeout=NmapProcessingDefaultManagerValues.PROCESS_TIMEOUT)

def start(self) -> None:
"""
Expand All @@ -160,21 +175,19 @@ def start(self) -> None:
"""
self.organize_processes()

@staticmethod
def get_results() -> dict:
def get_results(self) -> dict:
"""
Return dictionary with Nmap results
:return: Nmap results
"""
return NmapProcessingResults.RESULTS
return self.results_pool

@staticmethod
def get_results_count() -> int:
def get_results_count(self) -> int:
"""
Return quantity of Nmap results
:return: quantity of results
"""
return len(NmapProcessingResults.RESULTS)
return len(self.results_pool)

def __del__(self):
"""
Expand Down
75 changes: 45 additions & 30 deletions grinder/pyscriptexecutor.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
#!/usr/bin/env python3

from multiprocessing import Process, JoinableQueue, Manager
from os import system
from contextlib import redirect_stdout, redirect_stderr
from importlib.machinery import SourceFileLoader
from types import ModuleType
from multiprocessing import Process, JoinableQueue, Manager, freeze_support
from os import system
from pathlib import Path
from contextlib import redirect_stdout, redirect_stderr
from time import sleep
from types import ModuleType

from grinder.decorators import exception_handler
from grinder.defaultvalues import DefaultValues
Expand All @@ -24,32 +24,27 @@ class PyProcessingValues:

POLLING_RATE = 0.5
EMPTY_QUEUE_POLLING_RATE = 1.0


class PyProcessingResults:
"""
Collect python scripts results
"""

RESULTS = Manager().dict({})
PROCESS_TIMEOUT = 300


class PyProcessing(Process):
"""
Create a custom process to run python scripts as an independent worker
"""

def __init__(self, queue: JoinableQueue, mute: bool = False):
def __init__(self, results_pool: dict, queue: JoinableQueue, mute: bool = False):
"""
Initialize the process worker
:param queue: general joinable task queue
:param mute: bool flag for running scripts in silent mode (w/o output at all)
:param results_pool: pool of results
"""
Process.__init__(self)
self.queue = queue
self.mute = mute
self.base_path = self._initialize_base_path()
self.results_pool = results_pool

@staticmethod
def _initialize_base_path() -> Path:
Expand Down Expand Up @@ -108,13 +103,18 @@ def run(self) -> None:
"Unknown script",
)
try:
# Poll with POLLING_RATE interval
sleep(PyProcessingValues.POLLING_RATE)

# Get host info from queue
current_progress, host_info, py_script = self.queue.get()
if (current_progress, host_info, py_script) == (None, None, None):
self.queue.task_done()
return

ip = host_info.get("ip")
port = host_info.get("port")

# Poll with POLLING_RATE interval
sleep(PyProcessingValues.POLLING_RATE)

# Setup logging
log_progress = f"[{current_progress[0]}/{current_progress[1]}] ({current_progress[2]})"
log_host = f"{ip}:{port}"
Expand All @@ -128,12 +128,12 @@ def run(self) -> None:
self.queue.task_done()
continue
try:
if ip not in PyProcessingResults.RESULTS.keys():
PyProcessingResults.RESULTS.update({ip: result})
if ip not in self.results_pool.keys():
self.results_pool.update({ip: result})
else:
old_result = PyProcessingResults.RESULTS.get(ip)
old_result = self.results_pool.get(ip)
old_result.update(result)
PyProcessingResults.RESULTS[ip] = old_result
self.results_pool[ip] = old_result
except (AttributeError, ConnectionRefusedError):
print(
f"{log_progress} -> Caught manager error on host {log_host}: simultaneous shared-dict call"
Expand All @@ -144,10 +144,11 @@ def run(self) -> None:
print(
f'{log_progress} -> script "{py_script}" crash for {log_host}: {str(script_err)}'
)
self.queue.task_done()
else:
print(f'{log_progress} -> script "{py_script}" done for {log_host}')
self.queue.task_done()
self.queue.task_done()
if self.queue.empty():
return


class PyProcessingManager:
Expand All @@ -170,6 +171,9 @@ def __init__(
:param workers: number of running processes
:param mute: bool flag for running scripts in silent mode (w/o output at all)
"""
freeze_support()
self.manager = Manager()
self.results_pool = self.manager.dict({})
self.ip_script_mapping = ip_script_mapping
self.hosts_info = hosts_info
self.workers = workers
Expand All @@ -183,10 +187,19 @@ def organize_processes(self) -> None:
:return: None
"""
queue = JoinableQueue()
processes = []
for _ in range(self.workers):
process = PyProcessing(queue, mute=self.mute)
freeze_support()
process = PyProcessing(
results_pool=self.results_pool, queue=queue, mute=self.mute
)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
try:
process.start()
except OSError:
pass
hosts_length = len(self.hosts_info)
for index, (ip, host_info) in enumerate(self.hosts_info.items()):
py_script = self.ip_script_mapping.get(ip)
Expand Down Expand Up @@ -221,7 +234,11 @@ def organize_processes(self) -> None:
script_file,
)
)
for _ in range(self.workers):
queue.put((None, None, None))
queue.join()
for process in processes:
process.join(timeout=PyProcessingValues.PROCESS_TIMEOUT)

def start(self) -> None:
"""
Expand All @@ -231,23 +248,21 @@ def start(self) -> None:
"""
self.organize_processes()

@staticmethod
def get_results() -> dict:
def get_results(self) -> dict:
"""
Return process manager results
:return: dictionary with {ip: results} format
"""
return PyProcessingResults.RESULTS
return self.results_pool

@staticmethod
def get_results_count() -> int:
def get_results_count(self) -> int:
"""
Return overall quantity of results
:return: None
"""
return len(PyProcessingResults.RESULTS)
return len(self.results_pool)

def __del__(self) -> None:
"""
Expand Down

0 comments on commit 868b506

Please sign in to comment.