summaryrefslogtreecommitdiffstats
path: root/suricata-update/suricata/update/main.py
diff options
context:
space:
mode:
Diffstat (limited to 'suricata-update/suricata/update/main.py')
-rw-r--r--suricata-update/suricata/update/main.py54
1 files changed, 34 insertions, 20 deletions
diff --git a/suricata-update/suricata/update/main.py b/suricata-update/suricata/update/main.py
index 4a0e7a6..18af7a8 100644
--- a/suricata-update/suricata/update/main.py
+++ b/suricata-update/suricata/update/main.py
@@ -88,7 +88,7 @@ else:
logger = logging.getLogger()
# If Suricata is not found, default to this version.
-DEFAULT_SURICATA_VERSION = "4.0.0"
+DEFAULT_SURICATA_VERSION = "6.0.0"
# The default filename to use for the output rule file. This is a
# single file concatenating all input rule files together.
@@ -235,6 +235,8 @@ class Fetch:
# The file is not an archive, treat it as an individual file.
basename = os.path.basename(filename).split("-", 1)[1]
+ if not basename.endswith(".rules"):
+ basename = "{}.rules".format(basename)
files = {}
files[basename] = open(filename, "rb").read()
return files
@@ -435,8 +437,7 @@ def manage_classification(suriconf, files):
def handle_dataset_files(rule, dep_files):
if not rule.enabled:
return
-
- dataset_load = [el.strip() for el in rule.dataset.split(",") if el.startswith("load")]
+ dataset_load = [el for el in (el.strip() for el in rule.dataset.split(",")) if el.startswith("load")]
if not dataset_load:
# No dataset load found.
return
@@ -446,7 +447,7 @@ def handle_dataset_files(rule, dep_files):
prefix = os.path.dirname(rule.group)
# Construct the source filename.
- source_filename = "{}/{}".format(prefix, dataset_filename)
+ source_filename = os.path.join(prefix, dataset_filename)
# If a source filename starts with a "/", look for it on the filesystem. The archive
# unpackers will take care of removing a leading / so this shouldn't happen for
@@ -464,9 +465,9 @@ def handle_dataset_files(rule, dep_files):
return
dataset_contents = dep_files[source_filename]
- content_hash = hashlib.md5(dataset_contents).hexdigest()
- new_rule = re.sub("(dataset.*?load\s+){}".format(dataset_filename), "\g<1>datasets/{}".format(content_hash), rule.format())
- dest_filename = os.path.join(config.get_output_dir(), "datasets", content_hash)
+ source_filename_hash = hashlib.md5(source_filename.encode()).hexdigest()
+ new_rule = re.sub(r"(dataset.*?load\s+){}".format(dataset_filename), r"\g<1>datasets/{}".format(source_filename_hash), rule.format())
+ dest_filename = os.path.join(config.get_output_dir(), "datasets", source_filename_hash)
dest_dir = os.path.dirname(dest_filename)
logger.debug("Copying dataset file {} to {}".format(dataset_filename, dest_filename))
try:
@@ -482,10 +483,19 @@ def handle_filehash_files(rule, dep_files, fhash):
if not rule.enabled:
return
filehash_fname = rule.get(fhash)
- filename = [fname for fname, content in dep_files.items() if os.path.join(*(fname.split(os.path.sep)[1:])) == filehash_fname]
- if filename:
+
+ # Get the directory name the rule is from.
+ prefix = os.path.dirname(rule.group)
+
+ source_filename = os.path.join(prefix, filehash_fname)
+ dest_filename = source_filename[len(prefix) + len(os.path.sep):]
+ logger.debug("dest_filename={}".format(dest_filename))
+
+ if source_filename not in dep_files:
+ logger.error("{} file {} was not found".format(fhash, filehash_fname))
+ else:
logger.debug("Copying %s file %s to output directory" % (fhash, filehash_fname))
- filepath = os.path.join(config.get_state_dir(), os.path.dirname(filename[0]))
+ filepath = os.path.join(config.get_output_dir(), os.path.dirname(dest_filename))
logger.debug("filepath: %s" % filepath)
try:
os.makedirs(filepath)
@@ -493,11 +503,10 @@ def handle_filehash_files(rule, dep_files, fhash):
if oserr.errno != errno.EEXIST:
logger.error(oserr)
sys.exit(1)
- logger.debug("output fname: %s" % os.path.join(filepath, os.path.basename(filehash_fname)))
- with open(os.path.join(filepath, os.path.basename(filehash_fname)), "w+") as fp:
- fp.write(dep_files[os.path.join("rules", filehash_fname)].decode("utf-8"))
- else:
- logger.error("{} file {} was not found".format(fhash, filehash_fname))
+ output_filename = os.path.join(filepath, os.path.basename(filehash_fname))
+ logger.debug("output fname: %s" % output_filename)
+ with open(output_filename, "w") as fp:
+ fp.write(dep_files[source_filename].decode("utf-8"))
def write_merged(filename, rulemap, dep_files):
@@ -700,9 +709,9 @@ def resolve_flowbits(rulemap, disabled_rules):
class ThresholdProcessor:
patterns = [
- re.compile("\s+(re:\"(.*)\")"),
- re.compile("\s+(re:(.*?)),.*"),
- re.compile("\s+(re:(.*))"),
+ re.compile(r"\s+(re:\"(.*)\")"),
+ re.compile(r"\s+(re:(.*?)),.*"),
+ re.compile(r"\s+(re:(.*))"),
]
def extract_regex(self, buf):
@@ -984,9 +993,14 @@ def load_sources(suricata_version):
# Now download each URL.
files = []
for url in urls:
+
+ # To de-duplicate filenames, add a prefix that is a hash of the URL.
+ prefix = hashlib.md5(url[0].encode()).hexdigest()
source_files = Fetch().run(url)
for key in source_files:
- files.append(SourceFile(key, source_files[key]))
+ content = source_files[key]
+ key = os.path.join(prefix, key)
+ files.append(SourceFile(key, content))
# Now load local rules.
if config.get("local") is not None:
@@ -1184,7 +1198,7 @@ def _main():
# Disable rule that are for app-layers that are not enabled.
if suriconf:
for key in suriconf.keys():
- m = re.match("app-layer\.protocols\.([^\.]+)\.enabled", key)
+ m = re.match(r"app-layer\.protocols\.([^\.]+)\.enabled", key)
if m:
proto = m.group(1)
if not suriconf.is_true(key, ["detection-only"]):