Skip to content

Commit

Permalink
black formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
aouinizied committed May 10, 2024
1 parent 4942c65 commit 5481132
Show file tree
Hide file tree
Showing 22 changed files with 1,643 additions and 761 deletions.
63 changes: 37 additions & 26 deletions browser/extension_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,45 +19,55 @@
import sys


NFRequest = namedtuple('NFRequest', ['browser',
'timestamp',
'remote_ip',
'tab_id',
'request_id',
'tab_is_active',
'tab_url'])
NFRequest = namedtuple(
"NFRequest",
[
"browser",
"timestamp",
"remote_ip",
"tab_id",
"request_id",
"tab_is_active",
"tab_url",
],
)


class NFRequestHandler(BaseHTTPRequestHandler):
""" Handler for HTTP request from browser extension """
"""Handler for HTTP request from browser extension"""

def _set_headers(self):
""" headers setter """
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
"""headers setter"""
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "content-type")
self.end_headers()

def do_OPTIONS(self):
""" OPTIONS handler """
"""OPTIONS handler"""
self.send_response(200)
self._set_headers()

def do_POST(self):
""" POST handler """
if self.path.endswith('.json') and (self.path.startswith('/nfstream-chrome') or
self.path.startswith('/nfstream-firefox')):
length = self.headers['content-length']
"""POST handler"""
if self.path.endswith(".json") and (
self.path.startswith("/nfstream-chrome")
or self.path.startswith("/nfstream-firefox")
):
length = self.headers["content-length"]
data = json.loads(self.rfile.read(int(length)))
self.send_response(200)
self._set_headers()
request = NFRequest(data["browser"],
float(data["timestamp"]),
data["ip_address"],
data["tab_id"],
data["req_id"],
data["tab_is_active"],
data["tab_url"])
request = NFRequest(
data["browser"],
float(data["timestamp"]),
data["ip_address"],
data["tab_id"],
data["req_id"],
data["tab_is_active"],
data["tab_url"],
)

# For sake of brevity, we print it only
print(request)
Expand All @@ -72,19 +82,20 @@ def log_message(self, fmt, *args):


class NFRequestServer(HTTPServer):
""" NFRequest HTTP server"""
"""NFRequest HTTP server"""

def __init__(self, *args):
HTTPServer.__init__(self, *args)
self.stopped = False
# self.channel = channel


if __name__ == '__main__': # Mandatory if you are running on Windows Platform
if __name__ == "__main__": # Mandatory if you are running on Windows Platform
try:
port = int(sys.argv[1])
except IndexError: # not specified
port = 28314
server_address = ('', port) # localhost with configurable port
server_address = ("", port) # localhost with configurable port
server = NFRequestServer(server_address, NFRequestHandler)
try:
while not server.stopped:
Expand Down
9 changes: 4 additions & 5 deletions examples/csv_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@
import sys


if __name__ == '__main__': # Mandatory if you are running on Windows Platform
if __name__ == "__main__": # Mandatory if you are running on Windows Platform
path = sys.argv[1]
print("nfstream processing started. Use Ctrl+C to interrupt and save.")
total_flows = NFStreamer(source=path,
statistical_analysis=True,
splt_analysis=10,
performance_report=1).to_csv()
total_flows = NFStreamer(
source=path, statistical_analysis=True, splt_analysis=10, performance_report=1
).to_csv()
8 changes: 4 additions & 4 deletions examples/flow_printer.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@
import sys


if __name__ == '__main__': # Mandatory if you are running on Windows Platform
if __name__ == "__main__": # Mandatory if you are running on Windows Platform
input_filepaths = []
for path in sys.argv[1:]:
input_filepaths.append(path)
if len(input_filepaths) == 1: # Single file / Interface
input_filepaths = input_filepaths[0]

flow_streamer = NFStreamer(source=input_filepaths,
statistical_analysis=False,
idle_timeout=1)
flow_streamer = NFStreamer(
source=input_filepaths, statistical_analysis=False, idle_timeout=1
)
result = {}
try:
for flow in flow_streamer:
Expand Down
7 changes: 4 additions & 3 deletions examples/wfeatures_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,12 @@
sys.exit(1)



if __name__ == '__main__': # Mandatory if you are running on Windows Platform
if __name__ == "__main__": # Mandatory if you are running on Windows Platform
path = sys.argv[1]
print("nfstream processing started. Use Ctrl+C to interrupt and save.")
streamer = NFStreamer(source=path, active_timeout=41, udps=WFPlugin(active_timeout=41, levels=12))
streamer = NFStreamer(
source=path, active_timeout=41, udps=WFPlugin(active_timeout=41, levels=12)
)
print("Converting to pandas...")
df = streamer.to_pandas()
print("Dataframe: ")
Expand Down
28 changes: 16 additions & 12 deletions generate_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,22 +24,26 @@ def get_files_list(path):
files = []
for r, d, f in os.walk(path):
for file in f:
if '.pcap' == file[-5:] or ".pcapng" == file[-7:]: # Pick out only pcaps files
if (
".pcap" == file[-5:] or ".pcapng" == file[-7:]
): # Pick out only pcaps files
files.append(os.path.join(r, file))
files.sort()
return files


if __name__ == '__main__': # Mandatory if you are running on Windows Platform
if __name__ == "__main__": # Mandatory if you are running on Windows Platform
pcap_files = get_files_list(os.path.join("tests", "pcaps"))
for pcap_file in tqdm(pcap_files):
df = NFStreamer(source=pcap_file, n_dissections=20, n_meters=1).to_pandas()[["id",
"bidirectional_packets",
"bidirectional_bytes",
"application_name",
"application_category_name",
"application_is_guessed",
"application_confidence"]]
df.to_csv(pcap_file.replace("pcaps",
"results"),
index=False)
df = NFStreamer(source=pcap_file, n_dissections=20, n_meters=1).to_pandas()[
[
"id",
"bidirectional_packets",
"bidirectional_bytes",
"application_name",
"application_category_name",
"application_is_guessed",
"application_confidence",
]
]
df.to_csv(pcap_file.replace("pcaps", "results"), index=False)
4 changes: 2 additions & 2 deletions nfstream/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@

# streamer module is the core module of nfstream package.
__author__ = """Zied Aouini"""
__email__ = '[email protected]'
__version__ = '6.5.4'
__email__ = "[email protected]"
__version__ = "6.5.4"
26 changes: 14 additions & 12 deletions nfstream/anonymizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@

class NFAnonymizer(object):
"""
NFAnonymizer: NFStream anonymization implementation.
Anonymizer is initiated at each time to_csv or to_pandas is called with a random secret key (64 bytes).
Each specified column is anonymized using blake2b algorithm (digest_size: 64 bytes).
NFAnonymizer: NFStream anonymization implementation.
Anonymizer is initiated at each time to_csv or to_pandas is called with a random secret key (64 bytes).
Each specified column is anonymized using blake2b algorithm (digest_size: 64 bytes).
"""
__slots__ = ('_secret',
'_cols_names',
'_cols_index',
"_enabled")

__slots__ = ("_secret", "_cols_names", "_cols_index", "_enabled")

def __init__(self, cols_names):
self._secret = secrets.token_bytes(64)
Expand All @@ -38,19 +36,23 @@ def __init__(self, cols_names):

def process(self, flow):
if self._enabled:
if self._cols_index is None: # First flow, we extract indexes of cols to anonymize.
if (
self._cols_index is None
): # First flow, we extract indexes of cols to anonymize.
self._cols_index = []
for col_name in self._cols_names:
keys = flow.keys()
try:
self._cols_index.append(keys.index(col_name))
except ValueError:
print("WARNING: NFlow do not have {} attribute. Skipping anonymization.")
print(
"WARNING: NFlow do not have {} attribute. Skipping anonymization."
)
values = flow.values()
for col_idx in self._cols_index:
if values[col_idx] is not None:
values[col_idx] = blake2b(str(values[col_idx]).encode(),
digest_size=64,
key=self._secret).hexdigest()
values[col_idx] = blake2b(
str(values[col_idx]).encode(), digest_size=64, key=self._secret
).hexdigest()
return values
return flow.values()
40 changes: 26 additions & 14 deletions nfstream/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,44 +16,56 @@
from _lib_engine import ffi, lib


def setup_capture(ffi, lib, source, snaplen, promisc, mode, error_child, group_id, socket_buffer_size):
capture = lib.capture_open(bytes(source, 'utf-8'), int(mode), error_child, socket_buffer_size)
def setup_capture(
ffi, lib, source, snaplen, promisc, mode, error_child, group_id, socket_buffer_size
):
capture = lib.capture_open(
bytes(source, "utf-8"), int(mode), error_child, socket_buffer_size
)
if capture == ffi.NULL:
return
fanout_set_failed = lib.capture_set_fanout(capture, int(mode), error_child, group_id)
fanout_set_failed = lib.capture_set_fanout(
capture, int(mode), error_child, group_id
)
if fanout_set_failed:
return
timeout_set_failed = lib.capture_set_timeout(capture, int(mode), error_child)
if timeout_set_failed:
return
promisc_set_failed = lib.capture_set_promisc(capture, int(mode), error_child, int(promisc))
promisc_set_failed = lib.capture_set_promisc(
capture, int(mode), error_child, int(promisc)
)
if promisc_set_failed:
return
snaplen_set_failed = lib.capture_set_snaplen(capture, int(mode), error_child, snaplen)
snaplen_set_failed = lib.capture_set_snaplen(
capture, int(mode), error_child, snaplen
)
if snaplen_set_failed:
return
return capture


def setup_filter(capture, lib, error_child, bpf_filter):
""" Compile and setup BPF filter """
"""Compile and setup BPF filter"""
if bpf_filter is not None:
filter_set_failed = lib.capture_set_filter(capture, bytes(bpf_filter, 'utf-8'), error_child)
filter_set_failed = lib.capture_set_filter(
capture, bytes(bpf_filter, "utf-8"), error_child
)
if filter_set_failed:
return False
return True


def activate_capture(capture, lib, error_child, bpf_filter, mode):
""" Capture activation function """
"""Capture activation function"""
activation_failed = lib.capture_activate(capture, int(mode), error_child)
if activation_failed:
return False
return setup_filter(capture, lib, error_child, bpf_filter)


def setup_dissector(ffi, lib, n_dissections):
""" Setup dissector according to n_dissections value """
"""Setup dissector according to n_dissections value"""
if n_dissections: # Dissection activated
# Check that headers and loaded library match and initiate dissector.
checker = ffi.new("struct dissector_checker *")
Expand All @@ -70,13 +82,13 @@ def setup_dissector(ffi, lib, n_dissections):


def is_interface(val):
""" Check if val is a valid interface name and return it if true else None """
intf = lib.capture_get_interface(val.encode('ascii'))
"""Check if val is a valid interface name and return it if true else None"""
intf = lib.capture_get_interface(val.encode("ascii"))
if intf == ffi.NULL:
return None
return ffi.string(intf).decode('ascii', 'ignore')
return ffi.string(intf).decode("ascii", "ignore")


def create_engine():
""" engine creation function, return the loaded native nfstream engine and it's ffi interface"""
return ffi, lib
"""engine creation function, return the loaded native nfstream engine and it's ffi interface"""
return ffi, lib
Loading

0 comments on commit 5481132

Please sign in to comment.