diff --git a/cznichaas2warden/README.md b/cznichaas2warden/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..42972f5bfb7dd9b44c547b6697a9087f63135e91
--- /dev/null
+++ b/cznichaas2warden/README.md
@@ -0,0 +1,14 @@
+# haas2warden
+
+Warden connector for data of [CZ.NIC HaaS project](https://haas.nic.cz/).
+
+It downloads daily [HaaS data dumps](https://haas.nic.cz/stats/export/),
+converts them to IDEA messages and sends them to CESNET's Warden server.
+
+It should be run from `cron` every night when data from previous day are
+available (at 3:30).
+
+The script just writes IDEA messages as files into a "filer" directory.
+A _warden_filer_ daemon must be configured to pick up the messages
+and send them to Warden server.
+There is a systemd file which can be used to run the warden_filer.
diff --git a/cznichaas2warden/haas2warden.cron b/cznichaas2warden/haas2warden.cron
new file mode 100644
index 0000000000000000000000000000000000000000..dd0dbb6857aca39ecd5e4bc9878f7668807792b7
--- /dev/null
+++ b/cznichaas2warden/haas2warden.cron
@@ -0,0 +1,2 @@
+# Run every day at 03:30
+30 03 * * * haas2warden python3 /data/haas2warden/haas2warden.py -p /data/haas2warden/warden_filer/ -n org.example.ext.cznic_haas -t >> /data/haas2warden/haas2warden.log 2>&1 
diff --git a/cznichaas2warden/haas2warden.py b/cznichaas2warden/haas2warden.py
new file mode 100644
index 0000000000000000000000000000000000000000..130b82f90fa5f554a77220723515e700bad0a393
--- /dev/null
+++ b/cznichaas2warden/haas2warden.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+from gzip import decompress
+from json import loads
+from datetime import datetime, timedelta
+import argparse
+import logging
+import uuid
+import json
+import os
+import requests
+
+
+data_date = datetime.date(datetime.utcnow()) - timedelta(days=1)
+
+LOGFORMAT = "%(asctime)-15s,%(name)s [%(levelname)s] %(message)s"
+LOGDATEFORMAT = "%Y-%m-%dT%H:%M:%S"
+logging.basicConfig(level=logging.INFO, format=LOGFORMAT, datefmt=LOGDATEFORMAT)
+
+logger = logging.getLogger('haas2warden')
+
+def createIDEAFile(idea_id, idea_msg):
+    """
+    Creates file for IDEA message in .../tmp folder, then move it to .../incoming folder
+    """
+    tmp_dir_path = os.path.join(args.path, "tmp")
+    idea_file_path = os.path.join(tmp_dir_path, idea_id+".idea")
+    os.makedirs(tmp_dir_path, exist_ok=True)
+    idea_file = open(idea_file_path, "w")
+    idea_file.write(idea_msg)
+    idea_file.close()
+
+    incoming_dir_path = os.path.join(args.path, "incoming")
+    incoming_file_path = os.path.join(incoming_dir_path,idea_id+".idea")
+    os.makedirs(incoming_dir_path, exist_ok=True) 
+    os.rename(idea_file_path,incoming_file_path)
+
+
+def createIDEA(time, time_closed, ip, login_successful, commands):
+    """
+    Creates IDEA message 
+    """ 
+    idea_id = str(uuid.uuid4())
+
+    if login_successful:
+        category = "[\"Intrusion.UserCompromise\"]" 
+        description = "SSH login on honeypot (HaaS)"
+        if args.test:
+            category = "[\"Intrusion.UserCompromise\", \"Test\"]"
+        attach = f''',
+   "Attach": [
+        {{
+            "Note": "commands",
+            "Type": ["ShellCode"],
+            "ContentType": "application/json",
+            "Content": {json.dumps(commands)}
+        }}
+    ]''' #              ^-- "commands" is already serialiezed into a json string, we want to include it into a bigger JSON so we must encode it again (to escape quotes and any other special charaters)
+
+    else:
+        category = "[\"Attempt.Login\"]" 
+        description = "Unsuccessful SSH login attempt on honeypot (HaaS)"
+        if args.test:
+            category = "[\"Attempt.Login\", \"Test\"]"
+        attach = ""
+
+    if time_closed: # sometimes time_closed is empty, in such case we must omit CeaseTime completely from IDEA msg
+        cease_time = f'"CeaseTime": "{time_closed}",'
+    else:
+        cease_time = ""
+
+    idea_msg = f"""\
+{{
+    "Format": "IDEA0",
+    "ID": "{idea_id}",
+    "Category": {category},
+    "Description": "{description}",
+    "Note": "Extracted from data of CZ.NIC HaaS project",
+    "DetectTime": "{time}",
+    "EventTime": "{time}",
+    {cease_time}
+    "CreateTime": "{datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}",
+    "Source": [
+        {{
+            "IP4": ["{ip}"],
+            "Proto": ["tcp", "ssh"]
+        }}
+    ],
+    "Node": [
+        {{
+            "Name": "{args.name}",
+            "SW": ["CZ.NIC HaaS"],
+            "Type": ["Connection", "Auth", "Honeypot"],
+            "Note": "A script converting daily HaaS data dumps from https://haas.nic.cz/stats/export/"
+        }}
+    ]{attach}
+}}
+"""
+    createIDEAFile(idea_id, idea_msg)
+
+    
+def processJSON():
+    """
+    Downloads data from https://haas.nic.cz/stats/export/ and process json files.
+    """
+    date = datetime.strptime(args.date, '%Y-%m-%d').date()
+    # get url
+    url = "https://haas.nic.cz/stats/export/{}/{}/{}.json.gz".format(str(date).split('-')[0],str(date).split('-')[1], str(date))
+    # get data 
+    logger.info("Downloading {}".format(url))
+    response = requests.get(url)
+    if response.status_code == 200:
+        # unzip and read json file
+        json_objects = loads(decompress(response.content))
+        logger.info("Found {} records, converting to IDEA messages".format(len(json_objects)))
+        # go through all json objects
+        for json_object in json_objects:
+            createIDEA(json_object["time"], json_object["time_closed"], json_object["ip"], json_object["login_successful"], json.dumps(json_object["commands"]))
+
+if __name__ == "__main__":
+    
+    # parse arguments
+    parser = argparse.ArgumentParser(
+        prog="haas_receiver.py",
+        description="A script converting daily HaaS data dumps from https://haas.nic.cz/stats/export/"
+    )
+
+    parser.add_argument('-d', '--date', metavar='DATE', default = str(data_date),
+                        help='To download data from date YYYY-MM-DD, use date + 1 day (default: utcnow - 1 day)')
+    parser.add_argument('-p', '--path', metavar='DIRPATH', default = "/data/haas2warden/warden_filer/",
+                        help='Target folder for Idea messages (default: "/data/haas2warden/warden_filer/")')
+    parser.add_argument('-n', '--name', metavar='NODENAME', default = "undefined",
+                        help='Name of the node (default: undefined)')
+    parser.add_argument('-t', '--test', action="store_true",
+                        help='Test category')                                        
+
+    args = parser.parse_args()
+    
+    processJSON()
+    logger.info("Done")
diff --git a/cznichaas2warden/warden-filer.service b/cznichaas2warden/warden-filer.service
new file mode 100644
index 0000000000000000000000000000000000000000..ca81c2a95e6a093e998e8e1702b2c98233d73d79
--- /dev/null
+++ b/cznichaas2warden/warden-filer.service
@@ -0,0 +1,18 @@
+# Template of Systemd unit for Warden filer daemon
+#
+# TODO: set paths, username and mode (receiver/sender) in the last two lines
+# and uncomment them. Then copy the file to:
+#   /etc/systemd/system/warden-filer.service
+# and run:
+#   systemctl daemon-reload
+
+[Unit]
+Description=Warden filer for haas2warden
+After=syslog.target network.target
+
+[Service]
+Type=forking
+User=haas2warden
+PIDFile=/data/haas2warden/warden_filer.pid
+ExecStart=/opt/warden_filer/warden_filer.py --daemon -c "/data/haas2warden/warden_filer.cfg" --pid_file "/data/haas2warden/warden_filer.pid" sender
+
diff --git a/cznichaas2warden/warden_filer.cfg b/cznichaas2warden/warden_filer.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..21b46cf97aa36563f3aeb1af878c6d6a5f8b597f
--- /dev/null
+++ b/cznichaas2warden/warden_filer.cfg
@@ -0,0 +1,21 @@
+{
+    // Warden config can be also referenced as:
+    // "warden": "/path/to/warden_client.cfg"
+    "warden": {
+        "url": "https://warden-hub.cesnet.cz/warden3",
+        "cafile": "/etc/pki/tls/certs/ca-bundle.crt",
+        "keyfile": "/data/haas2warden/key.pem",
+        "certfile": "/data/haas2warden/cert.pem",
+        "timeout": 10,
+        "errlog": {"level": "warning"},
+        "filelog": {"level": "info", "file": "/data/haas2warden/warden_filer.log"},
+        "idstore": "/data/haas2warden/warden_filer.id",
+        "name": "org.example.cznic_haas"
+    },
+    "sender": {
+        // Maildir like directory, whose "incoming" subdir will be checked
+        // for Idea events to send out
+        "dir": "/data/haas2warden/warden_filer",
+        "poll_time": 60
+    }
+}
diff --git a/fail2ban/README b/fail2ban/README
new file mode 100644
index 0000000000000000000000000000000000000000..76e202c76fc0018b81909cd268883983b362a5ac
--- /dev/null
+++ b/fail2ban/README
@@ -0,0 +1,45 @@
+Support scripts for fail2ban
+============================
+
+Introduction
+------------
+
+Fail2ban is a logfile watcher, which is able to run various actions,
+based on too many patterns occured in the log file.
+Those helper shell scripts can be used as action to report events to
+Warden_.
+
+Dependencies
+------------
+
+ 1. Python packages
+
+    warden_filer 3.0+
+
+Usage
+-----
+
+ * f2ban_spam.sh is meant to be used in cooperation with the default
+   "postfix" rule.
+ * f2ban_ssh.sh is meant to be used in cooperation with the default
+   "ssh" rule. 
+
+In the corresponding action following invocation can be used:
+
+   actionban = /usr/local/bin/f2ban_XXX.sh <ip> <failures> <time>
+
+Please, edit corresponding paths and Warden names in the corresponding
+script preamble and check/edit contents of the IDEA template (e.g. Target IP 
+address in f2ban_ssh.sh).
+
+Scripts write generated Idea_ events into warden_filer compatible
+directory, so you will need to run properly configured (and registered
+into Warden server) warden_filer instance, which will take care for
+picking up the events and submitting them.
+
+.. _Warden: https://warden.cesnet.cz/
+.. _Idea: https://idea.cesnet.cz/
+
+------------------------------------------------------------------------------
+
+Copyright (C) 2017 Cesnet z.s.p.o
diff --git a/fail2ban/f2ban_spam.sh b/fail2ban/f2ban_spam.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ba426e69ab6bb8aff98d56f2943759cdab52fe8c
--- /dev/null
+++ b/fail2ban/f2ban_spam.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+umask 0111
+
+filer_dir="/var/mentat/spool/_wardenout"
+src_ip=$1
+failures=$2
+detect_time=$(date --date="@$3" --rfc-3339=seconds)
+create_time=$(date --rfc-3339=seconds)
+node_name="org.example.fail2ban.blacklist"
+
+uuid() {
+        for ((n=0; n<16; n++)); do  
+                read -n1 c < /dev/urandom
+                LC_CTYPE=C d=$(printf '%d' "'$c")
+                s=''
+                case $n in
+                        6) ((d = d & 79 | 64));;   
+                        8) ((d = d & 191 | 128));;  
+                        3|5|9|7) s='-';; 
+                esac
+                printf '%02x%s' $d "$s"
+        done
+}
+
+event_id=$(uuid)
+
+cat >"$filer_dir/tmp/$event_id" <<EOF
+{
+   "Format" : "IDEA0",
+   "ID" : "$event_id",
+   "DetectTime" : "$detect_time",
+   "CreateTime" : "$create_time",
+   "Category" : ["Abusive.Spam"],
+   "Description" : "Blacklisted host",
+   "Note" : "Block duration: 3600. IP was blacklisted, is listed on more than 5 public blacklists",
+   "Source" : [{
+      "Type": ["Spam"],
+      "IP4" : ["$src_ip"],
+      "Proto": ["tcp", "smtp"]
+   }],
+   "Node" : [{
+         "Name" : "$node_name",
+         "SW" : ["Fail2Ban"],
+         "Type" : ["Log", "Statistical"]
+   }],
+   "_CESNET" : {
+      "Impact" : "IP was blacklisted, is listed on more than 5 public blacklists",
+      "EventTemplate" : "f2b-001"
+   }
+}
+EOF
+
+mv "$filer_dir/tmp/$event_id" "$filer_dir/incoming"
diff --git a/fail2ban/f2ban_ssh.sh b/fail2ban/f2ban_ssh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e814a2e7e0f4bd9a9662faed820aca04615ebc29
--- /dev/null
+++ b/fail2ban/f2ban_ssh.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+umask 0111
+
+filer_dir="/var/spool/warden_sender"
+src_ip=$1
+failures=$2
+detect_time=$(date --date="@$3" --rfc-3339=seconds)
+create_time=$(date --rfc-3339=seconds)
+node_name="org.example.fail2ban.ssh"
+
+uuid() {
+        for ((n=0; n<16; n++)); do  
+                read -n1 c < /dev/urandom
+                LC_CTYPE=C d=$(printf '%d' "'$c")
+                s=''
+                case $n in
+                        6) ((d = d & 79 | 64));;   
+                        8) ((d = d & 191 | 128));;  
+                        3|5|9|7) s='-';; 
+                esac
+                printf '%02x%s' $d "$s"
+        done
+}
+
+event_id=$(uuid)
+
+cat >"$filer_dir/tmp/$event_id" <<EOF
+{
+   "Format": "IDEA0",
+   "ID": "$event_id",
+   "DetectTime": "$detect_time",
+   "CreateTime": "$create_time",
+   "Category": ["Attempt.Login"],
+   "Description": "SSH dictionary/bruteforce attack",
+   "ConnCount": $failures,
+   "Note": "IP attempted $failures logins to SSH service",
+   "Source": [{
+      "IP4": ["$src_ip"],
+      "Proto": ["tcp", "ssh"]
+   }],
+   "Target": [{
+       "Type": ["Anonymised"],
+       "IP4": ["192.0.2.0/24"],
+       "Anonymised": true,
+       "Proto": ["tcp", "ssh"],
+       "Port": [22]
+   }],
+   "Node": [{
+         "Name": "$node_name",
+         "SW": ["Fail2Ban"],
+         "Type": ["Log", "Statistical"]
+   }]
+}
+EOF
+
+mv "$filer_dir/tmp/$event_id" "$filer_dir/incoming"
diff --git a/hp-labrea/README b/hp-labrea/README
new file mode 100644
index 0000000000000000000000000000000000000000..6c8be9c12a6816b6f89cfe80d4d44b97f4424440
--- /dev/null
+++ b/hp-labrea/README
@@ -0,0 +1,67 @@
+Warden LaBrea connector 0.1 for Warden 3.X
+==========================================
+
+Introduction
+------------
+
+labrea-idea.py is a daemon, meant for continuous watching of LaBrea log files
+and generation of Idea_ format of corresponding security events. It is
+usually run in correspondence with warden_filer daemon, which picks the
+resulting events up and feeds them to the Warden_ server. Connector supports
+sliding window aggregation, so sets of connections with the same source are
+reported as one event (within aggregation window).
+
+
+Dependencies
+------------
+
+ 1. Platform
+
+    Python 2.7+
+
+ 2. Python packages
+
+    warden_filer 3.0+ (recommended)
+
+
+Usage
+-----
+
+        ./labrea-idea.py [options] logfile ...
+
+        Options:
+          -h, --help            show this help message and exit
+          -w WINDOW, --window=WINDOW
+                                max detection window (default: 900)
+          -t TIMEOUT, --timeout=TIMEOUT
+                                detection timeout (default: 300)
+          -n NAME, --name=NAME  Warden client name
+          --test                Add Test category
+          -o, --oneshot         process files and quit (do not daemonize)
+          --poll=POLL           log file polling interval
+          -d DIR, --dir=DIR     Target directory (mandatory)
+          -p PID, --pid=PID     create PID file with this name (default: /var/run
+                                /labrea-idea.pid)
+          -u UID, --uid=UID     user id to run under
+          -g GID, --gid=GID     group id to run under
+          -v, --verbose         turn on debug logging
+          --log=LOG             syslog facility or log file name (default: local7)
+          --realtime            use system time along with log timestamps (default)
+          --norealtime          don't system time, use solely log timestamps
+
+
+Configuration
+-------------
+
+However, the daemon is usually run by init script (example one is a part of
+the distribution, along with sample logrotate definition). Options then can
+be configured by /etc/sysconfig/labrea-idea or /etc/defaults/labrea-idea,
+depending on your distribution custom, where at least PARAMS variable has
+to be specified (for others, see the init script).
+    
+.. _Warden: https://warden.cesnet.cz/
+.. _Idea: https://idea.cesnet.cz/
+
+------------------------------------------------------------------------------
+
+Copyright (C) 2017 Cesnet z.s.p.o
diff --git a/hp-labrea/labrea-idea b/hp-labrea/labrea-idea
new file mode 100755
index 0000000000000000000000000000000000000000..b7fccef3c1434d2012952637bed6e3b090a4bff0
--- /dev/null
+++ b/hp-labrea/labrea-idea
@@ -0,0 +1,58 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides:          labrea-idea
+# Required-Start:    $local_fs $syslog
+# Required-Stop:     $local_fs $syslog
+# Should-Start:      $network
+# Should-Stop:       $network
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Labrea-Idea aggregator/converter
+### END INIT INFO
+
+DAEMON_NAME=labrea-idea
+DAEMON_PATH=/usr/local/bin/"$DAEMON_NAME".py
+PID=/var/run/"$DAEMON_NAME".pid
+
+# Try Debian & Fedora/RHEL/Suse sysconfig
+for n in default sysconfig; do
+	[ -f /etc/$n/"$DAEMON_NAME" ] && . /etc/$n/"$DAEMON_NAME"
+done
+
+# Fallback
+function log_daemon_msg () { echo -n "$@"; }
+function log_end_msg () { [ $1 -eq 0 ] && echo " OK" || echo " Failed"; }
+function status_of_proc () { [ -f "$PID" ] && ps u -p $(<"$PID") || echo "$PID not found."; }
+
+[ -f /lib/lsb/init-functions ] && . /lib/lsb/init-functions
+
+ACTION="$1"
+
+case "$ACTION" in
+	start)
+		if [ -z "$PARAMS" ]; then
+			log_daemon_msg "Unconfigured $DAEMON_NAME, not starting."
+			exit 2
+		fi
+		mkdir -p "${PID%/*}"
+		log_daemon_msg "Starting $DAEMON_NAME"
+		start_daemon -p "$PID" "$DAEMON_PATH" --pid "$PID" $PARAMS
+		log_end_msg $?
+		;;
+	stop)
+		log_daemon_msg "Stopping $DAEMON_NAME"
+		killproc -p "$PID" "$DAEMON_PATH"
+		log_end_msg $?
+		;;
+	restart|force-reload)
+		$0 stop && sleep 2 && exec $0 start
+		;;
+	status)
+		status_of_proc -p "$PID" "$DAEMON_PATH"
+		;;
+	*)
+		echo "Usage: $0 {start|stop|restart|status}"
+		exit 2
+		;;
+esac
diff --git a/hp-labrea/labrea-idea.logrotate b/hp-labrea/labrea-idea.logrotate
new file mode 100644
index 0000000000000000000000000000000000000000..6665ccbe588bf15903860e7dba0b4868bc2d8691
--- /dev/null
+++ b/hp-labrea/labrea-idea.logrotate
@@ -0,0 +1,11 @@
+/var/log/labrea-idea.log
+{
+	rotate 52
+	weekly
+	missingok
+	notifempty
+	compress
+	delaycompress
+	dateext
+	create 640 mentat mentat
+}
diff --git a/hp-labrea/labrea-idea.py b/hp-labrea/labrea-idea.py
new file mode 100755
index 0000000000000000000000000000000000000000..93fb9da9f47d9b241644ab839869c70d1724e5a3
--- /dev/null
+++ b/hp-labrea/labrea-idea.py
@@ -0,0 +1,678 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+import re
+import time
+import optparse
+import signal
+import contextlib
+import uuid
+import json
+import socket
+import resource
+import atexit
+import logging
+import logging.handlers
+import os.path as pth
+from itertools import izip
+from collections import namedtuple
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+
+class WindowContextMgr(object):
+
+    def __init__(self, window=60*10, timeout=60*5, ideagen=None):
+        self.contexts = {}
+        self.update_timestamp = 0
+        self.window = window
+        self.timeout = timeout
+        self.ideagen = ideagen
+        self.first_update_queue = OrderedDict()
+        self.last_update_queue = OrderedDict()
+        # Hammer to mitigate too big events
+        self.max_count = 2000
+        self.max_src_ports = 1024
+
+    def expire_queue(self, queue, window):
+        aggr_events = []
+        ctx_to_del = []
+        for ctx, timestamp in queue.iteritems():
+            if timestamp >= self.update_timestamp - window:
+                break
+            closed = self.ctx_close(self.contexts[ctx])
+            if closed is not None:
+                aggr_events.append(closed)
+            ctx_to_del.append(ctx)
+        for ctx in ctx_to_del:
+            del self.contexts[ctx]
+            del self.first_update_queue[ctx]
+            del self.last_update_queue[ctx]
+        return aggr_events
+
+    def process(self, event=None, timestamp=None):
+        if timestamp > self.update_timestamp:
+            self.update_timestamp = timestamp
+
+        aggr_events = []
+
+        aggr_events.extend(self.expire_queue(self.first_update_queue, self.window))
+        aggr_events.extend(self.expire_queue(self.last_update_queue, self.timeout))
+
+        if event is not None:
+            ctx = self.match(event)
+            if ctx is not None:
+                if ctx not in self.contexts:
+                    self.contexts[ctx] = self.ctx_create(event)
+                    self.first_update_queue[ctx] = self.update_timestamp
+                    self.last_update_queue[ctx] = self.update_timestamp
+                else:
+                    if not self.ctx_append(self.contexts[ctx], event):
+                        closed = self.ctx_close(self.contexts[ctx])
+                        if closed is not None:
+                            aggr_events.append(closed)
+                        del self.contexts[ctx]
+                        del self.first_update_queue[ctx]
+                        del self.last_update_queue[ctx]
+                    else:
+                        del self.last_update_queue[ctx]
+                        self.last_update_queue[ctx] = self.update_timestamp
+
+        return aggr_events
+
+    def close(self, timestamp):
+        if timestamp is not None and timestamp > self.update_timestamp:
+            self.update_timestamp = timestamp
+        aggr_events = []
+        for context in self.contexts.values():
+            closed = self.ctx_close(context)
+            if closed is not None:
+                aggr_events.append(closed)
+        self.contexts = {}
+        self.first_update_queue = OrderedDict()
+        self.last_update_queue = OrderedDict()
+        return aggr_events
+
+
+class PingContextMgr(WindowContextMgr):
+
+    def match(self, event):
+        return event.src_ip if 'Ping' in event.message else None
+
+    def ctx_create(self, event):
+        ctx = dict(
+            src_ip=event.src_ip,
+            tgt_ips=set([event.tgt_ip]),
+            count=1,
+            first_update=self.update_timestamp,
+            last_update=self.update_timestamp
+        )
+        return ctx
+
+    def ctx_append(self, ctx, event):
+        ctx["tgt_ips"].add(event.tgt_ip)
+        ctx["count"] += 1
+        ctx["last_update"] = self.update_timestamp
+        return ctx["count"] < self.max_count
+
+    def ctx_close(self, ctx):
+        return self.ideagen.gen_idea(
+            src=ctx["src_ip"],
+            src_ports=None,
+            targets=[(ip, []) for ip in ctx["tgt_ips"]],
+            detect_time=self.update_timestamp,
+            event_time=ctx["first_update"],
+            cease_time=ctx["last_update"],
+            count=ctx["count"],
+            template="ping"
+        )
+
+
+class ConnectContextMgr(WindowContextMgr):
+
+    def match(self, event):
+        return event.src_ip if 'Connect' in event.message else None
+
+    def ctx_create(self, event):
+        ctx = dict(
+            src_ip=event.src_ip,
+            src_ports=set([event.src_port]),
+            tgt_ips_ports={event.tgt_ip: set([event.tgt_port])},
+            count=1,
+            first_update=self.update_timestamp,
+            last_update=self.update_timestamp
+        )
+        return ctx
+
+    def ctx_append(self, ctx, event):
+        tgt_ports = ctx["tgt_ips_ports"].setdefault(event.tgt_ip, set())
+        tgt_ports.add(event.tgt_port)
+        ctx["src_ports"].add(event.src_port)
+        ctx["count"] += 1
+        ctx["last_update"] = self.update_timestamp
+        return ctx["count"] < self.max_count
+
+    def ctx_close(self, ctx):
+        src_ports = ctx["src_ports"] if len(ctx["src_ports"]) <= self.max_src_ports else None
+        return self.ideagen.gen_idea(
+            src=ctx["src_ip"],
+            src_ports=src_ports,
+            targets=ctx["tgt_ips_ports"].items(),
+            detect_time=self.update_timestamp,
+            event_time=ctx["first_update"],
+            cease_time=ctx["last_update"],
+            count=ctx["count"],
+            template="connect"
+        )
+
+
+class InboundContextMgr(ConnectContextMgr):
+
+    def match(self, event):
+        return event.src_ip if 'Inbound' in event.message else None
+
+    def ctx_close(self, ctx):
+        return self.ideagen.gen_idea(
+            src=ctx["src_ip"],
+            src_ports=ctx["src_ports"],
+            targets=ctx["tgt_ips_ports"].items(),
+            detect_time=self.update_timestamp,
+            event_time=ctx["first_update"],
+            cease_time=ctx["last_update"],
+            count=ctx["count"],
+            template="synack"
+        )
+
+
+class FileWatcher(object):
+
+    def __init__(self, filename, tail=True):
+        self.filename = filename
+        self.open()
+        self.line_buffer = ""
+        if tail and self.f:
+            self.f.seek(0, os.SEEK_END)
+
+    def open(self):
+        try:
+            self.f = open(self.filename, "r")
+            st = os.fstat(self.f.fileno())
+            self.inode, self.size = st.st_ino, st.st_size
+        except IOError:
+            self.f = None
+            self.inode = -1
+            self.size = -1
+
+    def _check_reopen(self):
+        try:
+            st = os.stat(self.filename)
+            cur_inode, cur_size = st.st_ino, st.st_size
+        except OSError as e:
+            cur_inode = -1
+            cur_size = -1
+        if cur_inode != self.inode or cur_size < self.size:
+            self.close()
+            self.open()
+
+    def readline(self):
+        if not self.f:
+            self.open()
+            if not self.f:
+                return self.line_buffer
+        res = self.f.readline()
+        if not res:
+            self._check_reopen()
+            if not self.f:
+                return self.line_buffer
+            res = self.f.readline()
+        if not res.endswith("\n"):
+            self.line_buffer += res
+        else:
+            res = self.line_buffer + res
+            self.line_buffer = ""
+            return res
+
+    def close(self):
+        try:
+            if self.f:
+                self.f.close()
+        except IOError:
+            pass
+        self.inode = -1
+        self.size = -1
+
+    def __repr__(self):
+        return '%s("%s")' % (type(self).__name__, self.filename)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.close()
+        return False
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        return self.readline()
+
+
+class IdeaGen(object):
+
+    def __init__(self, name, test=False):
+        self.name = name
+        self.test = test
+        self.template = {
+            "connect": {
+                "category": ["Recon.Scanning"],
+                "description": "TCP connections/scan",
+                "template": "labrea-001",
+                "note": "Connections from remote host to never assigned IP",
+                "proto": ["tcp"]
+            },
+            "ping": {
+                "category": ["Recon.Scanning"],
+                "description": "Ping scan",
+                "template": "labrea-002",
+                "note": "Ping requests from remote host to never assigned IP",
+                "proto": ["icmp"]
+            },
+            "synack": {
+                "category": ["Availability.DoS"],
+                "description": "Unsolicited TCP SYN/ACK connections/scan",
+                "template": "labrea-003",
+                "note": "Unsolicited SYN/ACK packet received from remote host to never assigned IP",
+                "source_type": ["Backscatter"],
+                "source_to_target": True,
+                "proto": ["tcp"]
+            }
+        }
+
+    def format_timestamp(self, epoch):
+        return "%04d-%02d-%02dT%02d:%02d:%02dZ" % time.gmtime(epoch)[:6]
+
+    def gen_idea(self, src, src_ports, targets, detect_time, event_time, cease_time, count, template):
+        tmpl = self.template[template]
+        isource = {
+            "IP6" if ":" in src else "IP4": [src],
+            "Proto": tmpl["proto"]
+        }
+        if "source_type" in tmpl:
+            isource["Type"] = tmpl["source_type"]
+        if src_ports:
+            isource["Port"] = [int(port) for port in src_ports]
+        # Fold multiple IPs with the same portset
+        folded_tgt = {}
+        for tgt, ports in targets:
+            folded_tgt.setdefault(frozenset(ports), []).append(tgt)
+        itargets = []
+        for ports, tgt in folded_tgt.items():
+            itarget = {"Proto": tmpl["proto"]}
+            tgts4 = [ip for ip in tgt if ":" not in ip]
+            tgts6 = [ip for ip in tgt if ":" in ip]
+            if tgts4:
+                itarget["IP4"] = tgts4
+            if tgts6:
+                itarget["IP6"] = tgts6
+            if ports:
+                itarget["Port"] = [int(port) for port in ports]
+            itargets.append(itarget)
+        inode = {
+            "SW": ["LaBrea"],
+            "Type": ["Connection", "Tarpit"],
+            "Name": self.name
+        }
+        idea = {
+            "Format": "IDEA0",
+            "ID": str(uuid.uuid4()),
+            "Category": tmpl["category"] + (["Test"] if self.test else []),
+            "Description": tmpl["description"],
+            "DetectTime": self.format_timestamp(detect_time),
+            "EventTime": self.format_timestamp(event_time),
+            "CeaseTime": self.format_timestamp(cease_time),
+            "ConnCount": count,
+            "Note": tmpl["note"],
+            "_CESNET": {
+                "EventTemplate": tmpl["template"],
+            },
+            "Target": itargets,
+            "Node": [inode]
+        }
+        if tmpl.get("source_to_target", False):
+            idea["Target"].append(isource)
+        else:
+            idea["Source"] = [isource]
+        return idea
+
+
+class Filer(object):
+
+    def __init__(self, directory):
+        self.basedir = self._ensure_path(directory)
+        self.tmp = self._ensure_path(pth.join(self.basedir, "tmp"))
+        self.incoming = self._ensure_path(pth.join(self.basedir, "incoming"))
+        self.hostname = socket.gethostname()
+        self.pid = os.getpid()
+
+    def _ensure_path(self, p):
+        try:
+            os.mkdir(p)
+        except OSError:
+            if not pth.isdir(p):
+                raise
+        return p
+
+    def _get_new_name(self, fd=None):
+        (inode, device) = os.fstat(fd)[1:3] if fd else (0, 0)
+        return "%s.%d.%f.%d.%d" % (
+            self.hostname, self.pid, time.time(), device, inode)
+
+    def create_unique_file(self):
+        # First find and open name unique within tmp
+        tmpname = None
+        while not tmpname:
+            tmpname = self._get_new_name()
+            try:
+                fd = os.open(pth.join(self.tmp, tmpname), os.O_CREAT | os.O_RDWR | os.O_EXCL)
+            except OSError as e:
+                if e.errno != errno.EEXIST:
+                    raise   # other errors than duplicates should get noticed
+                tmpname = None
+        # Now we know the device/inode, rename to make unique within system
+        newname = self._get_new_name(fd)
+        os.rename(pth.join(self.tmp, tmpname), pth.join(self.tmp, newname))
+        nf = os.fdopen(fd, "w")
+        return nf, newname
+
+    def publish_file(self, short_name):
+        os.rename(pth.join(self.tmp, short_name), pth.join(self.incoming, short_name))
+
+
+def daemonize(
+        work_dir=None, chroot_dir=None,
+        umask=None, uid=None, gid=None,
+        pidfile=None, files_preserve=[], signals={}):
+    # Dirs, limits, users
+    if chroot_dir is not None:
+        os.chdir(chroot_dir)
+        os.chroot(chroot_dir)
+    if umask is not None:
+        os.umask(umask)
+    if work_dir is not None:
+        os.chdir(work_dir)
+    if gid is not None:
+        os.setgid(gid)
+    if uid is not None:
+        os.setuid(uid)
+    # Doublefork, split session
+    if os.fork() > 0:
+        os._exit(0)
+    try:
+        os.setsid()
+    except OSError:
+        pass
+    if os.fork() > 0:
+        os._exit(0)
+    # Setup signal handlers
+    for (signum, handler) in signals.items():
+        signal.signal(signum, handler)
+    # Close descriptors
+    descr_preserve = set(f.fileno() for f in files_preserve)
+    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+    if maxfd == resource.RLIM_INFINITY:
+        maxfd = 65535
+    for fd in range(maxfd, 3, -1):  # 3 means omit stdin, stdout, stderr
+        if fd not in descr_preserve:
+            try:
+                os.close(fd)
+            except Exception:
+                pass
+    # PID file
+    if pidfile is not None:
+        pidd = os.open(pidfile, os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_TRUNC)
+        os.write(pidd, str(os.getpid())+"\n")
+        os.close(pidd)
+        # Define and setup atexit closure
+
+        @atexit.register
+        def unlink_pid():
+            try:
+                os.unlink(pidfile)
+            except Exception:
+                pass
+    # Redirect stdin, stdout, stderr to /dev/null
+    devnull = os.open(os.devnull, os.O_RDWR)
+    for fd in range(3):
+        os.dup2(devnull, fd)
+
+
+def save_events(aggr, filer):
+    for event in aggr:
+        f, name = filer.create_unique_file()
+        with f:
+            f.write(json.dumps(event, ensure_ascii=True))
+        filer.publish_file(name)
+        logging.info(
+            "Saved %s %s \"%s\" (%d) as file %s" % (
+                event["ID"], "+".join(event["Category"]), event["Description"], event["ConnCount"], name))
+
+
+RE_LIST = (
+    # 1493035442 Initial Connect - tarpitting: 89.163.242.15 56736 -> 195.113.254.182 9898
+    # 1493037991 Inbound SYN/ACK: 185.62.190.15 21001 -> 195.113.252.222 15584
+    (
+        re.compile(r'([0-9]+) ([^:]*:) ([^ ]+) ([0-9]+) -> ([^ ]+) ([0-9]+).*'),
+        namedtuple("connect_tuple", ("timestamp", "message", "src_ip", "src_port", "tgt_ip", "tgt_port"))
+    ),
+    # 1493035442 Responded to a Ping: 88.86.96.25 -> 195.113.253.87 *
+    (
+        re.compile(r'([0-9]+) ([^:]*:) ([^ ]+) -> ([^ ]+).*'),
+        namedtuple("ping_tuple", ("timestamp", "message", "src_ip", "tgt_ip"))
+    )
+)
+
+
+def match_event(line):
+    for labrea_re, event_tuple in RE_LIST:
+        match = labrea_re.match(line)
+        if match:
+            return event_tuple(*match.groups())
+    logging.info("Unmatched line: \"%s\"" % line.replace("\n", r"\n"))
+    return None
+
+
+def get_args():
+    optp = optparse.OptionParser(
+        usage="usage: %prog [options] logfile ...",
+        description="Watch LaBrea logfiles and generate Idea events into directory")
+    optp.add_option(
+        "-w", "--window",
+        default=900,
+        dest="window",
+        type="int",
+        action="store",
+        help="max detection window (default: %default)")
+    optp.add_option(
+        "-t", "--timeout",
+        default=300,
+        dest="timeout",
+        type="int",
+        action="store",
+        help="detection timeout (default: %default)")
+    optp.add_option(
+        "-n", "--name",
+        default=None,
+        dest="name",
+        type="string",
+        action="store",
+        help="Warden client name")
+    optp.add_option(
+        "--test",
+        default=False,
+        dest="test",
+        action="store_true",
+        help="Add Test category")
+    optp.add_option(
+        "-o", "--oneshot",
+        default=False,
+        dest="oneshot",
+        action="store_true",
+        help="process files and quit (do not daemonize)")
+    optp.add_option(
+        "--poll",
+        default=1,
+        dest="poll",
+        type="int",
+        action="store",
+        help="log file polling interval")
+    optp.add_option(
+        "-d", "--dir",
+        default=None,
+        dest="dir",
+        type="string",
+        action="store",
+        help="Target directory (mandatory)"),
+    optp.add_option(
+        "-p", "--pid",
+        default=pth.join("/var/run", pth.splitext(pth.basename(sys.argv[0]))[0] + ".pid"),
+        dest="pid",
+        type="string",
+        action="store",
+        help="create PID file with this name (default: %default)")
+    optp.add_option(
+        "-u", "--uid",
+        default=None,
+        dest="uid",
+        type="int",
+        action="store",
+        help="user id to run under")
+    optp.add_option(
+        "-g", "--gid",
+        default=None,
+        dest="gid",
+        type="int",
+        action="store",
+        help="group id to run under")
+    optp.add_option(
+        "-v", "--verbose",
+        dest="verbose",
+        action="store_true",
+        help="turn on debug logging")
+    optp.add_option(
+        "--log",
+        default="local7",
+        dest="log",
+        type="string",
+        action="store",
+        help="syslog facility or log file name (default: %default)")
+    optp.add_option(
+        "--realtime",
+        default=True,
+        dest="realtime",
+        action="store_true",
+        help="use system time along with log timestamps (default)")
+    optp.add_option(
+        "--norealtime",
+        dest="realtime",
+        action="store_false",
+        help="don't system time, use solely log timestamps")
+    return optp
+
+
+running_flag = True
+reload_flag = False
+
+
+def terminate_me(signum, frame):
+    global running_flag
+    running_flag = False
+
+
+def reload_me(signum, frame):
+    global reload_flag
+    reload_flag = True
+
+
+def main():
+    global reload_flag
+    optp = get_args()
+    opts, args = optp.parse_args()
+    if not args or opts.name is None or opts.dir is None:
+        optp.print_help()
+        sys.exit()
+
+    logformat = "%(filename)s[%(process)d] %(message)s"
+    logger = logging.getLogger()
+    if opts.oneshot:
+        handler = logging.StreamHandler()
+        handler.setFormatter(logging.Formatter("%(asctime)s " + logformat))
+    else:
+        if "/" in opts.log:
+            handler = logging.handlers.WatchedFileHandler(opts.log)
+            handler.setFormatter(logging.Formatter("%(asctime)s " + logformat))
+        else:
+            handler = logging.handlers.SysLogHandler(address="/dev/log", facility=opts.log)
+            handler.setFormatter(logging.Formatter(logformat))
+    logger.addHandler(handler)
+    logger.setLevel(logging.DEBUG if opts.verbose else logging.INFO)
+
+    try:
+        if opts.oneshot:
+            signal.signal(signal.SIGINT, terminate_me)
+            signal.signal(signal.SIGTERM, terminate_me)
+            files = [open(arg) for arg in args]
+        else:
+            daemonize(
+                pidfile=opts.pid,
+                uid=opts.uid,
+                gid=opts.gid,
+                signals={
+                    signal.SIGINT: terminate_me,
+                    signal.SIGTERM: terminate_me,
+                    signal.SIGHUP: reload_me
+                })
+            files = [FileWatcher(arg) for arg in args]
+
+        ideagen = IdeaGen(name=opts.name, test=opts.test)
+        filer = Filer(opts.dir)
+        contexts = [
+            context(window=opts.window, timeout=opts.timeout, ideagen=ideagen)
+            for context in [PingContextMgr, ConnectContextMgr, InboundContextMgr]
+        ]
+
+        with contextlib.nested(*files):
+            for line_set in izip(*files):
+                for line in line_set:
+                    if not line:
+                        break
+
+                    event = match_event(line)
+
+                    if event or opts.realtime:
+                        timestamp = int(event.timestamp) if event else int(time.time())
+                        for context in contexts:
+                            save_events(context.process(event, timestamp), filer)
+
+                if not running_flag:
+                    break
+                if reload_flag:
+                    for f in files:
+                        f.close()
+                        f.open()
+                    reload_flag = False
+                if not any(line_set):
+                    time.sleep(opts.poll)
+
+        for context in contexts:
+            timestamp = int(time.time()) if opts.realtime else None
+            save_events(context.close(timestamp), filer)
+
+    except Exception:
+        logging.exception("Exception")
+
+main()