mirror of
https://github.com/Oxbian/SIDPS.git
synced 2025-07-07 04:14:46 +02:00
feat: config file + database connection / dockerfile
This commit is contained in:
64
idps/database.py
Normal file
64
idps/database.py
Normal file
@ -0,0 +1,64 @@
|
||||
import mysql.connector
|
||||
|
||||
|
||||
class Database:
|
||||
"""Classe pour effectuer les actions liées à la base de données (envoi d'alertes...)"""
|
||||
|
||||
def __init__(self, config):
|
||||
"""Connexion à la base de données à partir des identifiants dans la config"""
|
||||
self.conn = mysql.connector.connect(host=config["db_host"], database=config["db_database"], user=config["db_user"], password=config["db_password"], port=config["db_port"])
|
||||
self.config = config
|
||||
|
||||
def send_alert(self, alert):
|
||||
"""Ajoute une alerte dans la base de données
|
||||
@param alert: Alerte à rajouter dans la BDD"""
|
||||
|
||||
try:
|
||||
cursor = self.conn.cursor()
|
||||
sql_query = """
|
||||
INSERT INTO alertes (
|
||||
cef_version, date_alerte, event_gravite, device_product,
|
||||
device_vendor, device_version, alerte_name, sourceAddress,
|
||||
destinationAddress, destinationPort, sourcePort, protocol,
|
||||
applicationProtocol, reason, action, commentaire
|
||||
) VALUES (
|
||||
%(cef_version)s, %(date_alerte)s, %(event_gravite)s, %(device_product)s,
|
||||
%(device_vendor)s, %(device_version)s, %(alerte_name)s, %(src)s,
|
||||
%(dst)s, %(destinationPort)s, %(sourcePort)s, %(protocol)s,
|
||||
%(applicationProtocol)s, %(reason)s, %(action)s, %(commentaire)s
|
||||
);
|
||||
"""
|
||||
|
||||
# Paramètres pour la requête SQL
|
||||
params = {
|
||||
"cef_version": alert["CEF"],
|
||||
"date_alerte": alert["datetime"],
|
||||
"event_gravite": alert["agent_severity"],
|
||||
"device_product": alert["Device Product"],
|
||||
"device_vendor": alert["Device Vendor"],
|
||||
"device_version": alert["Device Version"],
|
||||
"alerte_name": alert["name"],
|
||||
"src": alert["src"],
|
||||
"dst": alert["dst"],
|
||||
"destinationPort": alert["dstPort"],
|
||||
"sourcePort": alert["srcPort"],
|
||||
"protocol": alert["protocol"],
|
||||
"applicationProtocol": alert["applicationProtocol"],
|
||||
"reason": alert["reason"],
|
||||
"action": alert["action"],
|
||||
"commentaire": alert["commentaire"]
|
||||
}
|
||||
|
||||
# Exécution de la requête d'insertion
|
||||
cursor.execute(sql_query, params)
|
||||
self.conn.commit()
|
||||
cursor.close()
|
||||
except mysql.connector.Error as err:
|
||||
print("Erreur lors de l'envoi de l'alerte: {}".format(err))
|
||||
|
||||
def get_key(self, key, default_val):
|
||||
"""Donne le contenue d'un paramètre spécifique de la config
|
||||
@param key: clé du paramètre souhaité
|
||||
@param default_val: valeur par défaut si la clé n'existe pas"""
|
||||
|
||||
return self.config.get(key, None)
|
65
idps/main.py
65
idps/main.py
@ -1,14 +1,19 @@
|
||||
from scapy.all import sniff, TCP, IP
|
||||
from scapy.config import conf
|
||||
conf.debug_dissector = 2
|
||||
|
||||
import importlib.util
|
||||
import os
|
||||
import time
|
||||
import tcp
|
||||
import database
|
||||
import json
|
||||
|
||||
|
||||
def load_rules(rules_dirpath = "/app/idps/rules"):
|
||||
"""Charger les fonctions de règles du répertoire de règles et des sous répertoires"""
|
||||
"""Charger les fonctions de règles du répertoire de règles et des sous répertoires
|
||||
@param rules_dirpath: Répertoire contenant les fichiers python de règles
|
||||
"""
|
||||
|
||||
if not os.path.exists(rules_dirpath):
|
||||
raise ValueError(f"Le chemin spécifié n'existe pas: {rules_dirpath}")
|
||||
@ -56,52 +61,84 @@ def load_rules(rules_dirpath = "/app/idps/rules"):
|
||||
rules_functions[parent_dir].append(module.rule)
|
||||
|
||||
except PermissionError:
|
||||
print(f"Permission refusée pour accéder au répertoire: {current_dir}")
|
||||
raise PermissionError(f"Permission refusée pour accéder au répertoire: {current_dir}")
|
||||
except OSError as e:
|
||||
print(f"Erreur lors de l'accès au répertoire {current_dir}: {e}")
|
||||
raise OSError(f"Erreur lors de l'accès au répertoire {current_dir}: {e}")
|
||||
|
||||
return rules_functions
|
||||
|
||||
|
||||
def check_frame_w_rules(packet, rules_functions, packets):
|
||||
"""Appliquer chaque règle des fonctions au paquet capturé."""
|
||||
def check_frame_w_rules(packet, rules_functions, packets, db):
|
||||
"""Appliquer chaque règle des fonctions au paquet capturé.
|
||||
@param packet: Paquet actuel à analyser
|
||||
@param rules_functions: liste de fonctions de règles
|
||||
@param packets: liste des paquets précédents (utile pour TCP)
|
||||
@param db: Objet database pour envoyer les alertes à la BDD
|
||||
"""
|
||||
|
||||
for rule_func in rules_functions:
|
||||
try:
|
||||
rule_func(packet, packets)
|
||||
rule_func(packet, packets, db)
|
||||
except Exception as e:
|
||||
print(f"Erreur lors de l'exécution de la règle : {e}")
|
||||
|
||||
|
||||
def packet_callback(packet, rules_functions, tcp_packets):
|
||||
def packet_callback(packet, rules_functions, tcp_packets, db):
|
||||
"""Callback réception d'un paquet
|
||||
@param packet: Paquet actuel à classer
|
||||
@param rules_functions: liste des fonctions de règles
|
||||
@param tcp_packets: Objet contenant une liste des paquets tcp précédents
|
||||
@param db: Objet database pour envoyer des alertes à la BDD
|
||||
"""
|
||||
|
||||
#print(packet)
|
||||
if IP in packet and TCP in packet:
|
||||
tcp_packets.add_packet(packet[IP].src, packet[TCP].sport, packet[IP].dst, packet[TCP].dport, packet[TCP].flags, time.time())
|
||||
#print(tcp_packets[packet[IP].src])
|
||||
check_frame_w_rules(packet, rules_functions['TCP'], tcp_packets)
|
||||
print(tcp_packets[packet[IP].src])
|
||||
check_frame_w_rules(packet, rules_functions['TCP'], tcp_packets, db)
|
||||
tcp_packets.clean_old_packets()
|
||||
|
||||
|
||||
def start_idps(IDS_IFACES = ["eth0","eth1"]):
|
||||
def read_config(config_filepath='config.json'):
|
||||
"""Charge les configurations depuis le fichier de config"""
|
||||
|
||||
try:
|
||||
with open(config_filepath, 'r', encoding='utf-8') as file:
|
||||
config = json.load(file)
|
||||
return config
|
||||
except FileNotFoundError:
|
||||
raise FileNotFoundError(f"Le fichier JSON {config_filepath} n'a pas été trouvé.")
|
||||
except json.JSONDecodeError:
|
||||
raise json.JSONDecodeError("Erreur lors de la lecture du fichier JSON. Le format peut être incorrect.")
|
||||
|
||||
|
||||
def start_idps():
|
||||
"""Charge les règles et démarre l'IDPS"""
|
||||
|
||||
print(f"Récupération des configurations")
|
||||
config = read_config()
|
||||
print(f"Configurations chargées")
|
||||
|
||||
print(f"Chargement des règles...")
|
||||
rules_functions = load_rules()
|
||||
rules_functions = load_rules(config["rules_dirpath"])
|
||||
print(f"Les règles sont chargées")
|
||||
|
||||
print(f"Connexion à la base de données")
|
||||
db = database.Database(config)
|
||||
print(f"Connexion réussite à la base de données")
|
||||
|
||||
# Opti possible: charger les règles par protocole, permettant des filtrages et donc optimiser
|
||||
# le nombre de fonctions vérifiant le paquet (snort s'arrête à la première corrélation par exemple)
|
||||
|
||||
tcp_packets = tcp.TCP(300)
|
||||
|
||||
# Lancer scapy & envoyer le paquet à chaque règle de l'IDPS
|
||||
sniff(iface=IDS_IFACES, prn=lambda packet: packet_callback(packet, rules_functions, tcp_packets), store=0)
|
||||
#wrpcap("idps.pcap", capture)
|
||||
sniff(iface=config["ifaces"], prn=lambda packet: packet_callback(packet, rules_functions, tcp_packets, db), store=0)
|
||||
|
||||
|
||||
def main():
|
||||
print(f"Démarrage de l'IDPS")
|
||||
start_idps()
|
||||
print(f"IDPS opérationel")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,8 +1,12 @@
|
||||
# Seuils
|
||||
TIME_WINDOW = 180
|
||||
NB_SEUIL = 5
|
||||
def rule(packet, tcp_packets, db):
|
||||
"""Règle SYNScan:
|
||||
Un SYNScan va envoyer des requêtes TCP avec le flag SYN
|
||||
Si le port est ouvert alors le serveur répondra: Syn ACK, puis le client Reset la connexion
|
||||
Sinon le port est fermé et le serveur répondera: Reset ACK
|
||||
"""
|
||||
|
||||
time_window = db.get_key("synscan_time", 180)
|
||||
seuil = db.get_key("synscan_count", 5)
|
||||
|
||||
def rule(packet, tcp_packets):
|
||||
if (tcp_packets.count_packet_of_type("RA", TIME_WINDOW) + tcp_packets.count_packet_of_type("SA", TIME_WINDOW)) + tcp_packets.count_packet_of_type("R", TIME_WINDOW) >= NB_SEUIL:
|
||||
if (tcp_packets.count_packet_of_type("RA", time_window) + tcp_packets.count_packet_of_type("SA", time_window)) + tcp_packets.count_packet_of_type("R", time_window) >= seuil:
|
||||
print(f"Alerte, seuil dépassés, risque de SynScan")
|
||||
|
@ -1,8 +1,11 @@
|
||||
# Seuils
|
||||
TIME_WINDOW = 180 # 180 secondes pour avoir X paquets
|
||||
NB_SEUIL = 5
|
||||
def rule(packet, tcp_packets, db):
|
||||
"""Règle TCPConnect Scan:
|
||||
Un scan TCP connect va effectuer une connexion TCP en entier sur chaque port scanné.
|
||||
Si le port est ouvert le serveur acceptera la connexion SYN -> SYN ACK -> ACK -> Reset -> ACK
|
||||
Sinon le port est fermé et le serveur refusera la connexion SYN -> Reset ACK
|
||||
"""
|
||||
time_window = db.get_key("tcpconnectscan_time", 180)
|
||||
seuil = db.get_key("tcpconnectscan_count", 5)
|
||||
|
||||
|
||||
def rule(packet, tcp_packets):
|
||||
if (tcp_packets.count_packet_of_type("A", TIME_WINDOW) + tcp_packets.count_packet_of_type("RA", TIME_WINDOW)) >= NB_SEUIL:
|
||||
if (tcp_packets.count_packet_of_type("A", time_window) + tcp_packets.count_packet_of_type("RA", time_window)) >= seuil:
|
||||
print(f"Alerte, seuils dépassés, risque de TCPConnectScan")
|
||||
|
27
idps/tcp.py
27
idps/tcp.py
@ -17,18 +17,18 @@ class TCP:
|
||||
self.packets[ip_src] = []
|
||||
|
||||
if flags == "S":
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, flags, timestamp])
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, [flags], timestamp])
|
||||
return
|
||||
|
||||
elif flags == "SA":
|
||||
i = self.find_packet_to_replace(ip_src, port_src, ip_dst, port_dst, "S", True)
|
||||
|
||||
if i is not None:
|
||||
self.packets[ip_dst][i][3] = "SA"
|
||||
self.packets[ip_dst][i][3].append("SA")
|
||||
self.packets[ip_dst][i][4] = timestamp
|
||||
return
|
||||
else:
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, flags, timestamp])
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, [flags], timestamp])
|
||||
return
|
||||
|
||||
elif flags == "A":
|
||||
@ -37,11 +37,11 @@ class TCP:
|
||||
i = self.find_packet_to_replace(ip_src, port_src, ip_dst, port_dst, "R", True)
|
||||
|
||||
if i is not None:
|
||||
self.packets[ip_src][i][3] = "A"
|
||||
self.packets[ip_src][i][3].append("A")
|
||||
self.packets[ip_src][i][4] = timestamp
|
||||
return
|
||||
else:
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, flags, timestamp])
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, [flags], timestamp])
|
||||
return
|
||||
|
||||
elif flags == "RA":
|
||||
@ -51,11 +51,11 @@ class TCP:
|
||||
i = self.find_packet_to_replace(ip_src, port_src, ip_dst, port_dst, "S")
|
||||
|
||||
if i is not None:
|
||||
self.packets[ip_src][i][3] = "RA"
|
||||
self.packets[ip_src][i][3].append("RA")
|
||||
self.packets[ip_src][i][4] = timestamp
|
||||
return
|
||||
else:
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, flags, timestamp])
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, [flags], timestamp])
|
||||
return
|
||||
|
||||
elif flags == "R":
|
||||
@ -65,11 +65,11 @@ class TCP:
|
||||
i = self.find_packet_to_replace(ip_src, port_src, ip_dst, port_dst, "S")
|
||||
|
||||
if i is not None:
|
||||
self.packets[ip_src][i][3] = "R"
|
||||
self.packets[ip_src][i][3].append("R")
|
||||
self.packets[ip_src][i][4] = timestamp
|
||||
return
|
||||
else:
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, flags, timestamp])
|
||||
self.packets[ip_src].append([port_src, ip_dst, port_dst, [flags], timestamp])
|
||||
return
|
||||
|
||||
def find_packet_to_replace(self, ip_src, port_src, ip_dst, port_dst, flags, reverse=False):
|
||||
@ -78,8 +78,11 @@ class TCP:
|
||||
ip_src, ip_dst = ip_dst, ip_src
|
||||
port_src, port_dst = port_dst, port_src
|
||||
|
||||
if ip_src not in self.packets.keys():
|
||||
return None
|
||||
|
||||
for i, [p_s, ip_d, p_d, f, stamp] in enumerate(self.packets[ip_src]):
|
||||
if p_s == port_src and ip_d == ip_dst and p_d == port_dst and f == flags:
|
||||
if p_s == port_src and ip_d == ip_dst and p_d == port_dst and f in flags:
|
||||
return i
|
||||
return None
|
||||
|
||||
@ -110,9 +113,11 @@ class TCP:
|
||||
current_timestamp = time.time()
|
||||
for ip in list(self.packets.keys()):
|
||||
for packet in self.packets[ip]:
|
||||
if packet[3] == flag and packet[4] >= current_timestamp - time_treshold:
|
||||
if flag in packet[3] and packet[4] >= current_timestamp - time_treshold:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def __getitem__(self, src_ip):
|
||||
"""Retourne la liste des paquets liés à une adresse IP, pour du déboggage"""
|
||||
|
||||
return self.packets.get(src_ip, None)
|
||||
|
Reference in New Issue
Block a user