Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • Pavel.Valach/warden
1 result
Show changes
Showing
with 2795 additions and 1994 deletions
/*
*
* -*- coding: utf-8 -*-
*
* warden-map.css
*
* Copyright (C) 2016 Cesnet z.s.p.o
* Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
*
*/
body {
font-family: 'Oswald', sans-serif;
background: #00253D;
border: 0px;
padding: 0px;
margin: 0px;
}
h2 {
color: #0062a2;
}
.hoverinfo {
font-family: 'Oswald', sans-serif;
}
#country {
color: #0062a2; /* Cesnet blue */
font-weight: bold;
}
table {
text-align: left;
margin: 0;
padding: 0;
font-size: 12px;
}
table th {
color: #0062a2; /* Cesnet blue */
padding: 0;
}
table td {
color: #4b4d4a; /* Greenish gray */
padding: 0;
}
#container {
overflow: hidden;
/* border: 2px solid #0062a2;
border: 0px;
padding: 0px;
margin: 0px;
border-radius: 5px;*/
position: relative;
/* width: 1280px;
height: 720px;*/
max-width: 100%;
max-height: 100%
width: 100%;
height: 100vh;*/
}
.zoom-button {
width: 40px;
height: 40px;
border-radius: 5px;
border: none;
background: #dcdcda;
font-size: 23px;
font-weight: bold;
color: white;
cursor: pointer;
}
.zoom-button:hover {
background-color: #0062a2;
}
#zoom-info {
display: inline-block;
padding: 10px;
color: #0062a2;
}
#warden-logo {
position: absolute;
top: 30px;
left: 30px;
background: white;
padding: 10px;
border-radius: 10px;
width: 240px;
height: 92px;
text-align: center;
}
#cesnet-logo {
position: absolute;
top: 30px;
right: 30px;
background: white;
padding: 10px;
border-radius: 10px;
width: 240px;
height: 92px;
text-align: center;
}
#legend-box {
position: absolute;
bottom: 30px;
left: 30px;
background-color: rgba(0,0,0,0.3);
color: white;
padding: 10px;
border-radius: 10px;
/*width: 240px;
height: 92px;
text-align: center;*/
}
#heading {
position: absolute;
top: 30px;
left: 50%;
width: 40em;
height: 92px;
margin-left: -20em;
font-size: xx-large;
color: white;
text-align: center;
vertical-align: middle;
line-height: 92px;
}
/*
*
* -*- coding: utf-8 -*-
*
* warden-map.js
*
* Copyright (C) 2016 Cesnet z.s.p.o
* Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
*
*/
// NOTE: Change path in a function d3.json() if you separate backend and frontend!
// Zooming functionality is based on WunderBart's implementation
// Please see following links:
// https://github.com/wunderbart
// https://jsfiddle.net/wunderbart/Lom3b0gb/
function Zoom(args) {
$.extend(this, {
$buttons: $(".zoom-button"),
$info: $("#zoom-info"),
scale: { max: 50, currentShift: 0 },
$container: args.$container,
datamap: args.datamap
});
this.init();
}
Zoom.prototype.init = function() {
var paths = this.datamap.svg.selectAll("path"),
subunits = this.datamap.svg.selectAll(".datamaps-subunit");
// preserve stroke thickness
paths.style("vector-effect", "non-scaling-stroke");
// disable click on drag end
subunits.call(
d3.behavior.drag().on("dragend", function() {
d3.event.sourceEvent.stopPropagation();
})
);
this.scale.set = this._getScalesArray();
this.d3Zoom = d3.behavior.zoom().scaleExtent([ 1, this.scale.max ]);
this._displayPercentage(1);
this.listen();
};
Zoom.prototype.listen = function() {
this.$buttons.off("click").on("click", this._handleClick.bind(this));
this.datamap.svg
.call(this.d3Zoom.on("zoom", this._handleScroll.bind(this)))
.on("dblclick.zoom", null); // disable zoom on double-click
};
Zoom.prototype.reset = function() {
this._shift("reset");
};
Zoom.prototype._handleScroll = function() {
var translate = d3.event.translate,
scale = d3.event.scale,
limited = this._bound(translate, scale);
this.scrolled = true;
this._update(limited.translate, limited.scale);
};
Zoom.prototype._handleClick = function(event) {
var direction = $(event.target).data("zoom");
this._shift(direction);
};
Zoom.prototype._shift = function(direction) {
var center = [ this.$container.width() / 2, this.$container.height() / 2 ],
translate = this.d3Zoom.translate(), translate0 = [], l = [],
view = {
x: translate[0],
y: translate[1],
k: this.d3Zoom.scale()
}, bounded;
translate0 = [
(center[0] - view.x) / view.k,
(center[1] - view.y) / view.k
];
if (direction == "reset") {
view.k = 1;
this.scrolled = true;
} else {
view.k = this._getNextScale(direction);
}
l = [ translate0[0] * view.k + view.x, translate0[1] * view.k + view.y ];
view.x += center[0] - l[0];
view.y += center[1] - l[1];
bounded = this._bound([ view.x, view.y ], view.k);
this._animate(bounded.translate, bounded.scale);
};
Zoom.prototype._bound = function(translate, scale) {
var width = this.$container.width(),
height = this.$container.height();
translate[0] = Math.min(
(width / height) * (scale - 1),
Math.max( width * (1 - scale), translate[0] )
);
translate[1] = Math.min(0, Math.max(height * (1 - scale), translate[1]));
return { translate: translate, scale: scale };
};
Zoom.prototype._update = function(translate, scale) {
this.d3Zoom
.translate(translate)
.scale(scale);
this.datamap.svg.selectAll("g")
.attr("transform", "translate(" + translate + ")scale(" + scale + ")");
this._displayPercentage(scale);
};
Zoom.prototype._animate = function(translate, scale) {
var _this = this,
d3Zoom = this.d3Zoom;
d3.transition().duration(350).tween("zoom", function() {
var iTranslate = d3.interpolate(d3Zoom.translate(), translate),
iScale = d3.interpolate(d3Zoom.scale(), scale);
return function(t) {
_this._update(iTranslate(t), iScale(t));
};
});
};
Zoom.prototype._displayPercentage = function(scale) {
var value;
value = Math.round(Math.log(scale) / Math.log(this.scale.max) * 100);
this.$info.text(value + "%");
};
Zoom.prototype._getScalesArray = function() {
var array = [],
scaleMaxLog = Math.log(this.scale.max);
for (var i = 0; i <= 10; i++) {
array.push(Math.pow(Math.E, 0.1 * i * scaleMaxLog));
}
return array;
};
Zoom.prototype._getNextScale = function(direction) {
var scaleSet = this.scale.set,
currentScale = this.d3Zoom.scale(),
lastShift = scaleSet.length - 1,
shift, temp = [];
if (this.scrolled) {
for (shift = 0; shift <= lastShift; shift++) {
temp.push(Math.abs(scaleSet[shift] - currentScale));
}
shift = temp.indexOf(Math.min.apply(null, temp));
if (currentScale >= scaleSet[shift] && shift < lastShift) {
shift++;
}
if (direction == "out" && shift > 0) {
shift--;
}
this.scrolled = false;
} else {
shift = this.scale.currentShift;
if (direction == "out") {
shift > 0 && shift--;
} else {
shift < lastShift && shift++;
}
}
this.scale.currentShift = shift;
return scaleSet[shift];
};
function defaults(obj) {
Array.prototype.slice.call(arguments, 1).forEach(function(source) {
if (source) {
for (var prop in source) {
// Deep copy if property not set
if (obj[prop] == null) {
if (typeof source[prop] == 'function') {
obj[prop] = source[prop];
}
else {
obj[prop] = JSON.parse(JSON.stringify(source[prop]));
}
}
}
}
});
return obj;
}
function val( datumValue, optionsValue, context ) {
if ( typeof context === 'undefined' ) {
context = optionsValue;
optionsValues = undefined;
}
var value = typeof datumValue !== 'undefined' ? datumValue : optionsValue;
if (typeof value === 'undefined') {
return null;
}
if ( typeof value === 'function' ) {
var fnContext = [context];
if ( context.geography ) {
fnContext = [context.geography, context.data];
}
return value.apply(null, fnContext);
}
else {
return value;
}
}
var cat_color = {
"Abusive": "MediumPurple",
"Malware": "Red",
"Recon": "LightSlateGray",
"Attempt": "GhostWhite",
"Intrusion": "DarkTurquoise",
"Availability": "HotPink",
"Information": "PaleTurquoise",
"Fraud": "Yellow",
"Vulnerable": "DarkGoldenRod",
"Anomaly": "Brown",
"Other": "Green"
}
var cat_desc = {
"Abusive": "spam",
"Malware": "virus, worm, trojan, malware",
"Recon": "scanning, sniffing",
"Attempt": "bruteforce, exploitation attempt",
"Intrusion": "botnet, successful exploit",
"Availability": "(D)DOS",
"Information": "wiretapping, spoofing, hijacking",
"Fraud": "phishing, scam",
"Vulnerable": "open for abuse",
"Anomaly": "unusual traffic",
"Other": "unknown/unidentified"
}
function handleArcs (layer, data, options) {
var self = this,
svg = this.svg;
if ( !data || (data && !data.slice) ) {
throw "Datamaps Error - arcs must be an array";
}
// For some reason arc options were put in an `options` object instead of the parent arc
// I don't like this, so to match bubbles and other plugins I'm moving it
// This is to keep backwards compatability
for ( var i = 0; i < data.length; i++ ) {
data[i] = defaults(data[i], data[i].options);
delete data[i].options;
}
if ( typeof options === "undefined" ) {
options = defaultOptions.arcConfig;
}
var arcs = layer.selectAll('path.datamaps-arc').data( data, JSON.stringify );
var path = d3.geo.path()
.projection(self.projection);
arcs
.enter()
.append('svg:path')
.attr('class', 'datamaps-arc')
.style('stroke-linecap', 'round')
.style('stroke', function(datum) {
/* return val(datum.strokeColor, options.strokeColor, datum);*/
for (cat in cat_color) {
if (datum.event.startsWith(cat)) {
return cat_color[cat];
}
}
return "Green";
})
.style('fill', 'none')
.style('stroke-width', function(datum) {
return val(datum.strokeWidth, options.strokeWidth, datum);
})
.attr('d', function(datum) {
var originXY, destXY;
originXY = self.latLngToXY(val(datum.origin.latitude, datum), val(datum.origin.longitude, datum))
destXY = self.latLngToXY(val(datum.destination.latitude, datum), val(datum.destination.longitude, datum));
var midXY = [ (originXY[0] + destXY[0]) / 2, (originXY[1] + destXY[1]) / 2];
if (options.greatArc) {
// TODO: Move this to inside `if` clause when setting attr `d`
var greatArc = d3.geo.greatArc()
.source(function(d) { return [val(d.origin.longitude, d), val(d.origin.latitude, d)]; })
.target(function(d) { return [val(d.destination.longitude, d), val(d.destination.latitude, d)]; });
return path(greatArc(datum))
}
var sharpness = val(datum.arcSharpness, options.arcSharpness, datum);
return "M" + originXY[0] + ',' + originXY[1] + "S" + (midXY[0] + (50 * sharpness)) + "," + (midXY[1] - (75 * sharpness)) + "," + destXY[0] + "," + destXY[1];
})
.attr('data-info', function(datum) {
return JSON.stringify(datum);
})
.on('mouseover', function ( datum ) {
var $this = d3.select(this);
if (options.popupOnHover) {
self.updatePopup($this, datum, options, svg);
}
})
.on('mouseout', function ( datum ) {
var $this = d3.select(this);
d3.selectAll('.datamaps-hoverover').style('display', 'none');
})
.transition()
.style('fill', function(datum, i) {
/*
Thank you Jake Archibald, this is awesome.
Source: http://jakearchibald.com/2013/animated-line-drawing-svg/
*/
var length = this.getTotalLength();
this.style.transition = this.style.WebkitTransition = 'none';
this.style.strokeDasharray = length + ' ' + length;
this.style.strokeDashoffset = length;
this.getBoundingClientRect();
this.style.transition = this.style.WebkitTransition = 'stroke-dashoffset ' + val(datum.animationSpeed, options.animationSpeed, datum) + 'ms ' + datum.delay*1000 + 'ms ease-out';
this.style.strokeDashoffset = '0';
return 'none';
});
arcs.exit()
.transition()
.duration(1000)
.style('opacity', 0)
.remove();
}
var main_data = [];
var prev_data = 0;
// Configuration of datamap canvas
// Futher reading can be found at https://datamaps.github.io/
function Datamap() {
this.$container = $("#container");
instance = this.instance = new Datamaps({
scope: 'world',
element: this.$container.get(0),
done: this._handleMapReady.bind(this),
projection: 'mercator',
fills: {
/*defaultFill: '#454545'*/
defaultFill: 'black'
},
geographyConfig: {
hideAntarctica: true,
borderColor: '#0062a2',
highlightFillColor: '#4b4d4a',
highlightBorderColor: '#fdfdfd',
popupOnHover: true,
popupTemplate: function(geography, data) {
return '<div class="hoverinfo" id="country">' + geography.properties.name + '</div>';
},
},
ph_arcConfig: {
strokeColor: '#0062a2',
strokeWidth: 2,
arcSharpness: 2, /* 5 */
animationSpeed: 3000, // Milliseconds
popupOnHover: true,
// Case with latitude and longitude
popupTemplate: function(geography, data) {
if ( ( data.origin && data.destination ) && data.origin.latitude && data.origin.longitude && data.destination.latitude && data.destination.longitude ) {
// Content of info table
str = '<div class="hoverinfo"><table id="event"><tr><th>Warden Event</th></tr><tr><td>Type</td><td>'+ JSON.stringify(data.event) +'</td></tr><tr><td>Detect Time</td><td>'+ JSON.stringify(data.time) +'</td></tr><tr><th>Event origin</th></tr><tr><td>IP</td><td>' + JSON.stringify(data.origin.ip) + '</td></tr><tr><td>City & Country</td><td>' + JSON.stringify(data.origin.city) + ',&nbsp;' + JSON.stringify(data.origin.country_name) + '</td></tr><tr><td>GPS</td><td>' + JSON.stringify(data.origin.latitude) + ',&nbsp;' + JSON.stringify(data.origin.longitude) + '</td></tr><tr><th>Event Destination</th></tr><tr><td>IP</td><td>' + JSON.stringify(data.destination.ip) + '</td></tr><tr><td>City & Country</td><td>' + JSON.stringify(data.destination.city) + ',&nbsp;' + JSON.stringify(data.destination.country_name) + '</td></tr><tr><td>GPS</td><td>' + JSON.stringify(data.destination.latitude) + ',&nbsp;' + JSON.stringify(data.destination.longitude) + '</td></tr></table></div>';
return str.replace(/&quot;/g,"");
}
// Missing information
else {
return '';
}
}
}
});
legend_data = d3.select("#legend")
.selectAll("li")
.data(Object.keys(cat_color).sort())
.enter()
.append("li")
.append("span")
.style("color", function(datum) { return cat_color[datum]})
.text(function(datum) { return datum; })
.append("span")
.text(function(datum) { return "" + cat_desc[datum]})
.style("color", "white");
instance.addPlugin('ph_arc', handleArcs);
setInterval(function(){
d3.json("./warden-map.json", function(error, data) {
if (data) {
var cur_data = data.pop()
var cur_time = new Date().getTime();
if (cur_data != prev_data) {
prev_data = cur_data;
for (var i=0; i<data.length; i++) {
data[i].arrivalTime = cur_time;
data[i].delay = i/data.length;
}
main_data = main_data.concat(data);
}
}
var trimmed_data = [];
for (var i=0; i<main_data.length; i++) {
if (main_data[i].arrivalTime + 3500 > cur_time) {
trimmed_data.push(main_data[i]);
}
}
main_data = trimmed_data;
trimmed_data = cur_time = cur_data = error = data = null;
instance.ph_arc(main_data);
});
}, 1000);
};
Datamap.prototype._handleMapReady = function(datamap) {
this.zoom = new Zoom({
$container: this.$container,
datamap: datamap
});
}
<!-- -->
<!-- -->
<!-- -*- coding: utf-8 -*- -->
<!-- -->
<!-- warden-map.html -->
<!-- -->
<!-- Copyright (C) 2016 Cesnet z.s.p.o -->
<!-- Use of this source is governed by a 3-clause BSD-style license, see LICENSE file. -->
<!-- -->
<!-- -->
<!DOCTYPE html>
<meta name="robots" content="noindex">
<meta charset="utf-8">
<link href='https://fonts.googleapis.com/css?family=Oswald&amp;subset=latin,latin-ext' rel='stylesheet' type='text/css'>
<link rel="stylesheet" type="text/css" href="./css/warden-map.css"/>
<body>
<script src="https://d3js.org/d3.v3.min.js"></script>
<script src="https://d3js.org/topojson.v1.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
<script src="./js/datamaps.world.min.js"></script>
<script src="./js/warden-map.js"></script>
<!--
<h2>Warden Map</h2>
<div id="tools">
<button class="zoom-button" data-zoom="reset">&#x2302</button>
<button class="zoom-button" data-zoom="out">-</button>
<button class="zoom-button" data-zoom="in">+</button>
<div id="zoom-info"></div>
</div>
-->
<div id="container"></div>
<div id="heading">Attacks, detected in CESNET network<br/>
SABU - Sharing and Analysis of Security Events
</div>
<div id="legend-box">
<p><b>Reported to Warden right <i>now</i>.</b></p>
<ul id="legend"></ul>
</div>
<!-- Draw datamap into id="container" -->
<script>new Datamap();</script>
</body>
</html>
{
"url": "https://midas.civ.zcu.cz:8888/warden3",
"certfile": "kostik.zcu.cz-cert.pem",
"keyfile": "kostik.zcu.cz-key.pem",
"cafile": "Warden_CA-cacert.pem",
"timeout": 60,
"recv_events_limit": 6000,
"errlog": {"level": "debug"},
"filelog": {"file": "warden_client.log", "level": "warning"},
#"syslog": {"socket": "/dev/log", "facility": "local7", "level": "warning"},
"idstore": "warden_client.id",
"name": "warden_client_kostik",
"secret": "Phaipe5ush7p"
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "= IDEA0 format definition =\n\nKeys use !CamelCase, however to avoid confusion, they must be case insensitively unique within their parent object. When parsing, keys \"ID\", \"id\", \"iD\" and \"Id\" must be considered as equivalent.\n\nEach definition line is in form KEY: TYPE, followed by an explanation line, where type can be basic JSON type (in ''italics''), syntactically restricted type (with reference to [[#Types|Types]] chapter), or array of former two (order is important). Types define expected syntax, however their content may be further syntactically or semantically restricted according to particular key explanation.\n\nThe keys ''Format'', ''ID'', ''!DetectTime'' and ''Category'' are mandatory, rest of the keys is optional (nonexistent key indicates that information is not applicable or unknown).\n\nAs human language may be ambiguous inadvertently or by omission, when in doubt, consult [[IDEA/Schema|JSON schema]].",
"type": "object",
"required": ["Format", "ID", "DetectTime", "Category"],
"definitions": {
"Boolean": {
"description": "JSON \"true\" or \"false\" value.",
"type": "boolean"
},
"Integer": {
"description": "JSON \"number\" with no fractional and exponential part.",
"type": "integer"
},
"Version": {
"description": "Must contain string \"IDEA0\". (Trailing zero denotes draft version, after review/discussion and specification finalisation the name will change.)",
"type": "string",
"enum": ["IDEA0"]
},
"MediaType": {
"description": "Internet media type without parameters. Format is type and subtype, separated by slash, where type can contain only alphanumeric, underscore and minus sign, and subtype can contain only alphanumeric, plus and minus sign, underscore and dot.",
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+/[a-zA-Z0-9_+.-]+$"
},
"Charset": {
"description": "Character set name may consist of alphanumeric, dot, colon, minus sign, underscore and parentheses (round brackets).",
"type": "string",
"pattern": "^[a-zA-Z0-9.:_()-]+$"
},
"Encoding": {
"description": "May contain only string \"base64\" (however note that key can be nonexistent, which means native encoding).",
"type": "string",
"enum": ["base64"]
},
"Handle": {
"description": "String value unique among all \"Handle\" element values. May contain only alphanumeric or underscore, must not start with number and must not be empty.",
"type": "string",
"pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$"
},
"ID": {
"description": "String, containing reasonably globally unique identifier. UUID version 4 (random) or 5 (SHA-1) is recommended. As IDs are meant to be used at other mediums, transfer protocols and formats (an example being query string fields in URL), they are allowed to contain only reasonably safe subset of characters. May thus contain only alphanumeric, dot, minus sign and underscore and must not be empty.",
"type": "string",
"pattern": "^[a-zA-Z0-9._-]+$"
},
"Timestamp": {
"description": "String, containing timestamp conforming to [[http://tools.ietf.org/html/rfc3339|RFC 3339]].",
"type": "string",
"format": "date-time",
"pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}[Tt ][0-9]{2}:[0-9]{2}:[0-9]{2}(?:\\.[0-9]+)?(?:[Zz]|(?:[+-][0-9]{2}:[0-9]{2}))?$"
},
"Duration": {
"description": "String, containing time offset, intended for representing difference between two timestamps. Format is time part of [[http://tools.ietf.org/html/rfc3339|RFC 3339]], optionally prepended by \"D\" or \"d\" separator and number of days (which can have arbitrary number number of digits). \"D\" separator has been chosen to distinguish from internet time, and as a memory aid for \"duration\" or \"days\". For example \"536D10:20:30.5\" means 536 days, 10 hours, 20 seconds, 30.5 seconds, whereas 00:05:00 represents five minutes.\n\n[[http://tools.ietf.org/html/rfc2234|ABNF]] syntax:\n{{{\ntime-hour = 2DIGIT ; 00-23\ntime-minute = 2DIGIT ; 00-59\ntime-second = 2DIGIT ; 00-59\ntime-secfrac = \".\" 1*DIGIT\nseparator = \"D\" / \"d\"\ndays = 1*DIGIT\n\nduration = [days separator] time-hour \":\" time-minute \":\" time-second [time-secfrac]\n}}}",
"type": "string",
"format": "date-time",
"pattern": "^(?:[0-9]+[Dd])?[0-9]{2}:[0-9]{2}:[0-9]{2}(?:\\.[0-9]+)?$"
},
"URI": {
"description": "String, containing URI as defined in [[http://tools.ietf.org/html/rfc3986|RFC 3986]] and related.",
"type": "string",
"format": "uri",
"pattern": "^[a-zA-Z][a-zA-Z0-9+.-]*:[][a-zA-Z0-9._~:/?#@*'&'()*+,;=%-]*$"
},
"Net4": {
"description": "String, containing IPv4 range in human readable form. Range can be specified as CIDR network (\"192.0.2.0/24\") or two IP addresses in dot-decimal notation, separated by minus sign (\"192.0.2.0-192.0.2.255\").",
"type": "string",
"pattern": "^(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(?:(?:/(?:[0-9]|[1-2][0-9]|3[0-2]))|(?:-(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])))?$"
},
"Net6": {
"description": "String, containing IPv6 range in human readable form. Range can be specified as CIDR notation (\"2001:db8::/48\") or two IP addresses in colon-hexadecimal notation, separated by minus sign (\"2001:db8::-2001:db8:0:ffff:ffff:ffff:ffff:ffff\").",
"type": "string",
"pattern": "^(?:(?:(?:[0-9A-Fa-f]{1,4}:){7}(?:[0-9A-Fa-f]{1,4}|:))|(?:(?:[0-9A-Fa-f]{1,4}:){6}(?::[0-9A-Fa-f]{1,4}|(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9A-Fa-f]{1,4}:){5}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9A-Fa-f]{1,4}:){4}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,3})|(?:(?::[0-9A-Fa-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){3}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,4})|(?:(?::[0-9A-Fa-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){2}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,5})|(?:(?::[0-9A-Fa-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){1}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,6})|(?:(?::[0-9A-Fa-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?::(?:(?:(?::[0-9A-Fa-f]{1,4}){1,7})|(?:(?::[0-9A-Fa-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:)))(?:(?:/(?:\\d|\\d\\d|1[0-1]\\d|12[0-8]))?|-(?:(?:(?:[0-9A-Fa-f]{1,4}:){7}(?:[0-9A-Fa-f]{1,4}|:))|(?:(?:[0-9A-Fa-f]{1,4}:){6}(?::[0-9A-Fa-f]{1,4}|(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9A-Fa-f]{1,4}:){5}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3})|:))|(?:(?:[0-9A-Fa-f]{1,4}:){4}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,3})|(?:(?::[0-9A-Fa-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){3}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,4})|(?:(?::[0-9A-Fa-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){2}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,5})|(?:(?::[0-9A-Fa-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?:(?:[0-9A-Fa-f]{1,4}:){1}(?:(?:(?::[0-9A-Fa-f]{1,4}){1,6})|(?:(?::[0-9A-Fa-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))|(?::(?:(?:(?::[0-9A-Fa-f]{1,4}){1,7})|(?:(?::[0-9A-Fa-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)){3}))|:))))$"
},
"FQDN": {
"description": "String, containing fully qualified domain name. See [[https://tools.ietf.org/html/rfc1034#section-3.1|RFC 1034, chapter 3.1]], [[https://tools.ietf.org/html/rfc1035#section-2.3.1|RFC 1035, chapter 2.3.1]], [[https://tools.ietf.org/html/rfc1123#section-2|RFC 1123, section2]] and related.",
"type": "string",
"format": "hostname",
"allOf": [
{
"description": "FQDN label may start and/or end with letter or number, contain letters, numbers or hyphen. Labels must be separated by one dot.",
"pattern": "^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)*(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)?$"
},
{
"description": "Domain name labels may contain at most 63 characters.",
"pattern": "^(?:[^.]{1,63}\\.)*(?:[^.]{0,63})?$"
},
{
"description": "There can be at most 127 levels of labels.",
"pattern": "^(?:[^.]*\\.[^.]*){1,126}|[^.]*$"
},
{
"description": "Maximum length of domain name is 253 characters.",
"maxLength": 253
}
]
},
"DN": {
"description": "String, containing (possibly relative, not fully qualified) domain name. See [[https://tools.ietf.org/html/rfc1034#section-3.1|RFC 1034, chapter 3.1]], [[https://tools.ietf.org/html/rfc1035#section-2.3.1|RFC 1035, chapter 2.3.1]], [[https://tools.ietf.org/html/rfc1123#section-2|RFC 1123, section2]] and related.",
"type": "string",
"format": "hostname",
"allOf": [
{
"description": "DN label may start and/or end with letter or number, contain letters, numbers or hyphen. Labels must be separated by one dot.",
"pattern": "^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)*(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)?$"
},
{
"description": "Domain name labels may contain at most 63 characters.",
"pattern": "^(?:[^.]{1,63}\\.)*(?:[^.]{0,63})?$"
},
{
"description": "There can be at most 127 levels of labels.",
"pattern": "^(?:[^.]*\\.[^.]*){1,126}|[^.]*$"
},
{
"description": "Maximum length of domain name is 253 characters.",
"maxLength": 253
}
]
},
"MAC": {
"description": "String, containing MAC address in human friendly form - six groups of two hexadecimal digits, separated by colon.",
"type": "string",
"pattern": "^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$"
},
"Netname": {
"description": "URI string, containing LIR identifier and network identifier within LIR namespace, separated by colon.",
"type": "string",
"format": "uri",
"pattern": "^[a-zA-Z][a-zA-Z0-9+.-]*:[][a-zA-Z0-9._~:/?#@*'&'()*+,;=%-]*$"
},
"Hash": {
"description": "URI string, defining hash type and hash value, separated by colon.",
"type": "string",
"format": "uri",
"pattern": "^[a-zA-Z][a-zA-Z0-9+.-]*:[][a-zA-Z0-9._~:/?#@*'&'()*+,;=%-]*$"
},
"EventTag": {
"description": "Category name consists of one or two abbreviated parts - category and optional subcategory, separated by dot. If unsure of more precise nature of the incident, subcategory and dot may be omitted. Category and subcategory name must contain only alphanumeric, underscore and minus sign.\n\nFor semantics and taxonomy see [[IDEA/Classifications#EventTag|security event types classification]].",
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+(?:\\.[a-zA-Z0-9_-]+)?$"
},
"ProtocolName": {
"description": "Name must not be empty, must contain only alphanumeric and minus sign, must contain at least one letter, must not begin or end with a hyphen and two hyphens must not be adjacent.\n\nFor semantics and applicable strings see [[IDEA/Classifications#ProtocolName|protocols classification]].",
"type": "string",
"allOf": [
{
"description": "Protocol name must contain at least one letter.",
"pattern": "[a-zA-Z]"
},
{
"description": "Protocol name must contain only alphanumeric and minus sign.",
"pattern": "^[a-zA-Z0-9-]*$"
},
{
"description": "Protocol name must begin with alphanumeric.",
"pattern": "^[a-zA-Z0-9]"
},
{
"description": "Protocol name must end with alphanumeric.",
"pattern": "[a-zA-Z0-9]$"
},
{
"description": "There must not be two adjacent hyphens in protocol name.",
"not": {
"pattern": "--"
}
}
]
},
"SourceTargetTag": {
"description": "Tag name must contain only alphanumeric, underscore and minus sign.\n\nFor semantics and taxonomy see [[IDEA/Classifications#SourceTargetTag|source/target classification]].",
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+$"
},
"NodeTag": {
"description": "Tag name must contain only alphanumeric, underscore and minus sign.\n\nFor semantics and taxonomy see [[IDEA/Classifications#NodeTag|classification of detection nodes]].",
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+$"
},
"AttachmentTag": {
"description": "Tag name must contain only alphanumeric, underscore and minus sign.\n\nFor semantics and taxonomy see [[IDEA/Classifications#AttachmentTag|attachment description]].",
"type": "string",
"pattern": "^[a-zA-Z0-9_-]+$"
}
},
"properties": {
"Format": {
"description": "Identifier of the IDEA container.",
"$ref": "#/definitions/Version"
},
"ID": {
"description": "Unique message identifier.",
"$ref": "#/definitions/ID"
},
"AltNames": {
"description": "Array of alternative identifiers.",
"type": "array",
"items": {
"description": "Alternative identifiers; strings which help to pair the event to internal system information (for example tickets in request tracker systems).",
"type": "string"
}
},
"CorrelID": {
"description": "Array of correlated messages identifiers.",
"type": "array",
"items": {
"description": "Identifiers of messages, which are information sources for creation of this message in case the message has been created based on correlation/analysis/deduction of other messages.",
"$ref": "#/definitions/ID"
}
},
"AggrID": {
"description": "Array of aggregated messages identifiers.",
"type": "array",
"items": {
"description": "Identifiers of messages, which are aggregated into more concise form by this message. Should be sent mostly by intermediary nodes, which detect duplicates, or aggregate events, spanning multiple detection windows, into one longer.",
"$ref": "#/definitions/ID"
}
},
"PredID": {
"description": "Array of obsoleted messages identifiers.",
"type": "array",
"items": {
"description": "Identifiers of messages, which are obsoleted and information in them is replaced by this message. Should be sent only by detection nodes to incorporate further data about ongoing event.",
"$ref": "#/definitions/ID"
}
},
"RelID": {
"description": "Array of related messages identifiers.",
"type": "array",
"items": {
"description": "Otherwise related messages.",
"$ref": "#/definitions/ID"
}
},
"CreateTime": {
"description": "Timestamp of the creation of the IDEA message. May point out delay between detection and processing of data.",
"$ref": "#/definitions/Timestamp"
},
"DetectTime": {
"description": "Timestamp of the moment of detection of event (not necessarily time of the event taking place). This timestamp is mandatory, because every detector is able to know when it detected the information - for example when line about event appeared in the logfile, or when its information source says the event was detected, or at least when it accepted the information from the source.",
"$ref": "#/definitions/Timestamp"
},
"EventTime": {
"description": "Deduced start of the event/attack, or just time of the event if its solitary.",
"$ref": "#/definitions/Timestamp"
},
"CeaseTime": {
"description": "Deduced end of the event/attack.",
"$ref": "#/definitions/Timestamp"
},
"WinStartTime": {
"description": "Beginning of aggregation window in which event has been observed.",
"$ref": "#/definitions/Timestamp"
},
"WinEndTime": {
"description": "End of aggregation window in which event has been observed.",
"$ref": "#/definitions/Timestamp"
},
"ConnCount": {
"description": "Number of individual connections attempted or taken place.",
"$ref": "#/definitions/Integer"
},
"FlowCount": {
"description": "Number of individual simplex (one direction) flows.",
"$ref": "#/definitions/Integer"
},
"PacketCount": {
"description": "Number of individual packets transferred.",
"$ref": "#/definitions/Integer"
},
"ByteCount": {
"description": "Number of bytes transferred.",
"$ref": "#/definitions/Integer"
},
"Category": {
"description": "Array of event categories.",
"type": "array",
"items": {
"description": "Category of event.",
"$ref": "#/definitions/EventTag"
}
},
"Ref": {
"description": "Array of references.",
"type": "array",
"items": {
"description": "References to known sources, related to attack and/or vulnerability. May be URL of the additional info, or URN (according to [[http://tools.ietf.org/html/rfc2141|RFC 2141]]) in registered namespace ([[http://www.iana.org/assignments/urn-namespaces/urn-namespaces.xhtml|IANA]]) or unregistered ad-hoc namespace bearing reasonable information value and uniqueness, such as \"urn:cve:CVE-2013-5634\".",
"$ref": "#/definitions/URI"
}
},
"Confidence": {
"description": "Confidence of detector in its own reliability of this particular detection. (0 – surely false, 1 – no doubts). If key is not presented, detector does not know (or has no capability to estimate the confidence).",
"type": "number",
"maximum": 1,
"minimum": 0
},
"Description": {
"description": "Short free text human readable description.",
"type": "string"
},
"Note": {
"description": "Free text human readable addidional note, possibly longer description of incident if not obvious.",
"type": "string"
},
"Source": {
"type": "array",
"description": "Array of source or target descriptions.",
"items": {
"description": "Information concerning particular source or target.",
"type": "object",
"properties": {
"Type": {
"description": "Array of source/target categories.",
"type": "array",
"items": {
"description": "Closer category of source/target.",
"$ref": "#/definitions/SourceTargetTag"
}
},
"Hostname": {
"description": "Array of hostnames.",
"type": "array",
"items": {
"description": "Hostname of this source/target. Should be FQDN, but may not conform exactly, because values, extracted from logs, messages, DNS, etc. may themselves be malformed. Empty array can be used to explicitly indicate that value has been inquired and not found (missing DNS name).",
"type": "string"
}
},
"IP4": {
"description": "Array of IPv4 addresses.",
"type": "array",
"items": {
"description": "IPv4 addresses of this source/target.",
"$ref": "#/definitions/Net4"
}
},
"MAC": {
"description": "Array of MAC addresses.",
"type": "array",
"items": {
"description": "MAC addresses of this source/target.",
"$ref": "#/definitions/MAC"
}
},
"IP6": {
"description": "Array of IPv6 addresses.",
"type": "array",
"items": {
"description": "IPv6 addresses of this source/target.",
"$ref": "#/definitions/Net6"
}
},
"Port": {
"description": "Array of port numbers.",
"type": "array",
"items": {
"description": "Source or destination ports affected.",
"$ref": "#/definitions/Integer"
}
},
"Proto": {
"description": "Array of protocol names.",
"type": "array",
"items": {
"description": "Protocols, concerning connections from/to this source/target.",
"$ref": "#/definitions/ProtocolName"
}
},
"URL": {
"description": "Array of URLs.",
"type": "array",
"items": {
"description": "Unified Resource Locator of this source/target. Should be formatted according to [[http://tools.ietf.org/html/rfc1738|RFC 1738]], [[http://tools.ietf.org/html/rfc1808|RFC 1808]] and related, however may not conform exactly, because values, extracted from logs, messages, etc. may themselves be malformed.",
"type": "string"
}
},
"Email": {
"description": "Array of email addresses.",
"type": "array",
"items": {
"description": "Email address (for example Reply-To address in phishing message). Should be formatted according to [[http://tools.ietf.org/html/rfc5322#section-3.4|RFC 5322, section 3.4]] and related, however may not conform exactly, because values, extracted from logs, messages, DNS, etc. may themselves be malformed.",
"type": "string"
}
},
"AttachHand": {
"description": "Array of attachment identifiers.",
"type" : "array",
"items": {
"description": "Identifiers of attachments related to this source/target - contain \"Handle\"s of related attachments.",
"$ref": "#/definitions/Handle"
}
},
"Note": {
"description": "Free text human readable additional note.",
"type": "string"
},
"Spoofed": {
"description": "Establishes whether this source/target is forged.",
"$ref": "#/definitions/Boolean"
},
"Imprecise": {
"description": "Establishes whether this source/target is knowingly imprecise.",
"$ref": "#/definitions/Boolean"
},
"Anonymised": {
"description": "Establishes whether this source/target is willingly incomplete.",
"$ref": "#/definitions/Boolean"
},
"ASN": {
"description": "Autonomous system numbers.",
"type": "array",
"items": {
"description": "Autonomous system number of this source/target.",
"$ref": "#/definitions/Integer"
}
},
"Router": {
"description": "Array of router/interface paths.",
"type": "array",
"items": {
"description": "Router/interface path information. Intentionally organisation specific, router identifiers have usually no clear meaning outside organisational unit.",
"type": "string"
}
},
"Netname": {
"description": "Array of RIR network identifiers.",
"type": "array",
"items": {
"description": "RIR database reference network identifier (for example \"ripe:CESNET-BB2\" or \"arin:WETEMAA\"). Common network identifiers are: ripe, arin, apnic, lacnic, afrinic. Empty array can be used to explicitly indicate that value has been inquired and not found (IP address from unassigned block).",
"$ref": "#/definitions/Netname"
}
},
"Ref": {
"description": "Array of references.",
"type": "array",
"items": {
"description": "References to known sources, related to attack and/or vulnerability, specific to this source/target. May be URL of the additional info, or URN (according to [[http://tools.ietf.org/html/rfc2141|RFC 2141]]) in registered namespace ([[http://www.iana.org/assignments/urn-namespaces/urn-namespaces.xhtml|IANA]]) or unregistered ad-hoc namespace bearing reasonable information value and uniqueness, such as \"urn:cve:CVE-2013-2266\".",
"$ref": "#/definitions/URI"
}
}
}
}
},
"Target": {
"$ref": "#/properties/Source"
},
"Attach": {
"description": "Array of attachment descriptions.",
"type": "array",
"items": {
"type": "object",
"description": "Additional attachment information and data.",
"properties": {
"Handle": {
"description": "Message unique identifier for reference through Attach elements.",
"$ref": "#/definitions/Handle"
},
"FileName": {
"description": "Array of filenames.",
"type": "array",
"items": {
"description": "Names of the attached file.",
"type": "string"
}
},
"Type": {
"description": "Array of attachment type tags.",
"type": "array",
"items": {
"description": "Type of the attached data.",
"$ref": "#/definitions/AttachmentTag"
}
},
"Hash": {
"description": "Array of checksums.",
"type": "array",
"items": {
"description": "Checksum of the content (for example \"sha1:794467071687f7c59d033f4de5ece6b46415b633\" or \"md5:dc89f0b4ff9bd3b061dd66bb66c991b1\").",
"$ref": "#/definitions/Hash"
}
},
"Size": {
"description": "Length of the content.",
"$ref": "#/definitions/Integer"
},
"Ref": {
"description": "Array of references.",
"type": "array",
"items": {
"description": "References to known sources, related to attack and/or vulnerability, specific to this attachment. May be URL of the additional info, or URN (according to [[http://tools.ietf.org/html/rfc2141|RFC 2141]]) in registered namespace ([[http://www.iana.org/assignments/urn-namespaces/urn-namespaces.xhtml|IANA]]) or unregistered ad-hoc namespace bearing reasonable information value and uniqueness, such as \"urn:clamav:Win.Trojan.Banker-14334\".",
"$ref": "#/definitions/URI"
}
},
"Note": {
"description": "Free text human readable additional note.",
"type": "string"
},
"ContentType": {
"description": "Internet Media Type of the attachment, according to [[http://tools.ietf.org/html/rfc2046|RFC 2046]] and related. Along with [[http://www.iana.org/assignments/media-types/media-types.xhtml|types standardized by IANA]] also non standard but widely used media types can be used (for examples see [[http://www.freeformatter.com/mime-types-list.html|MIME types list at freeformatter.com]]).",
"$ref": "#/definitions/MediaType"
},
"ContentCharset": {
"description": "Name of the content character set according to [[http://www.iana.org/assignments/character-sets/character-sets.xhtml|IANA list]]. If key is not defined, unspecified binary encoding is assumed.",
"$ref": "#/definitions/Charset"
},
"ContentEncoding": {
"description": "Encoding of the content, if feasible. Nonexistent key means native JSON encoding.",
"$ref": "#/definitions/Encoding"
},
"Content": {
"description": "Attachment content.",
"type": "string"
},
"ContentID": {
"description": "Array of external content IDs.",
"type": "array",
"items": {
"description": "If content of attachment is transferred separately (in underlaying container), this key contains external ID of the content, so it can be paired back to message.",
"type": "string"
}
},
"ExternalURI": {
"description": "Array of external URIs.",
"type": "array",
"items": {
"description": "If content of attachment is available and/or recognizable from external source, this is defining URI (usually URL). May also be URN (according to [[http://tools.ietf.org/html/rfc2141|RFC 2141]]) in registered namespace ([[http://www.iana.org/assignments/urn-namespaces/urn-namespaces.xhtml|IANA]]) or unregistered ad-hoc namespace bearing reasonable information value and uniqueness, such as \"urn:mhr:55eaf7effadc07f866d1eaed9c64e7ee49fe081a\", \"magnet:?xt=urn:sha1:YNCKHTQCWBTRNJIV4WNAE52SJUQCZO5C\".",
"$ref": "#/definitions/URI"
}
}
}
}
},
"Node": {
"description": "Array of detector descriptions.",
"type": "array",
"items": {
"description": "Detector or possible intermediary (event aggregator, correlator, etc.) description.",
"type": "object",
"properties": {
"Name": {
"description": "Name of the detector, chosen by (and local to) organisational unit, within which it should be unique.",
"$ref": "#/definitions/DN"
},
"Realm": {
"description": "Administrative domain string. Usually denotes organisation (or smaller organisational unit) which detector belongs to. The tuple (Name, Realm) thus should be reasonably unique, however still bear some readily meaningful sense.",
"$ref": "#/definitions/FQDN"
},
"Type": {
"description": "Array of detection node types.",
"type": "array",
"items": {
"description": "Tag, describing various facets of the detector.",
"$ref": "#/definitions/NodeTag"
}
},
"SW": {
"description": "Array of detection software names.",
"type": "array",
"items": {
"description": "The name of the detection software (optionally including version). For example \"labrea-2.5-stable-1\" or \"HP !TippingPoint 7500NX\".",
"type": "string"
}
},
"AggrWin": {
"description": "The size of the aggregation window, if applicable.",
"$ref": "#/definitions/Duration"
},
"Note": {
"description": "Free text human readable additional description.",
"type": "string"
}
}
}
}
}
}
-- phpMyAdmin SQL Dump
-- version 3.4.11.1deb2+deb7u1
-- http://www.phpmyadmin.net
--
-- Host: localhost
-- Generation Time: Dec 04, 2014 at 02:54 PM
-- Server version: 5.5.38
-- PHP Version: 5.4.4-14+deb7u14
SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
--
-- Database: `warden3`
--
-- --------------------------------------------------------
--
-- Table structure for table `categories`
--
CREATE TABLE IF NOT EXISTS `categories` (
`id` int(11) NOT NULL,
`category` varchar(64) NOT NULL,
`subcategory` varchar(64) DEFAULT NULL,
`cat_subcat` varchar(129) NOT NULL,
KEY `cat_sub` (`cat_subcat`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci;
--
-- Dumping data for table `categories`
--
INSERT INTO `categories` (`id`, `category`, `subcategory`, `cat_subcat`) VALUES
(100, 'Abusive', NULL, 'Abusive'),
(101, 'Abusive', 'Spam', 'Abusive.Spam'),
(102, 'Abusive', 'Harassment', 'Abusive.Harassment'),
(103, 'Abusive', 'Child', 'Abusive.Child'),
(104, 'Abusive', 'Sexual', 'Abusive.Sexual'),
(105, 'Abusive', 'Violence', 'Abusive.Violence'),
(200, 'Malware', NULL, 'Malware'),
(201, 'Malware', 'Virus', 'Malware.Virus'),
(202, 'Malware', 'Worm', 'Malware.Worm'),
(203, 'Malware', 'Trojan', 'Malware.Trojan'),
(204, 'Malware', 'Spyware', 'Malware.Spyware'),
(205, 'Malware', 'Dialer', 'Malware.Dialer'),
(206, 'Malware', 'Rootkit', 'Malware.Rootkit'),
(300, 'Recon', NULL, 'Recon'),
(301, 'Recon', 'Scanning', 'Recon.Scanning'),
(302, 'Recon', 'Sniffing', 'Recon.Sniffing'),
(303, 'Recon', 'SocialEngineering', 'Recon.SocialEngineering'),
(304, 'Recon', 'Searching', 'Recon.Searching'),
(400, 'Attempt', NULL, 'Attempt'),
(401, 'Attempt', 'Exploit', 'Attempt.Exploit'),
(402, 'Attempt', 'Login', 'Attempt.Login'),
(403, 'Attempt', 'NewSignature', 'Attempt.NewSignature'),
(500, 'Intrusion', NULL, 'Intrusion'),
(501, 'Intrusion', 'AdminCompromise', 'Intrusion.AdminCompromise'),
(502, 'Intrusion', 'UserCompromise', 'Intrusion.UserCompromise'),
(503, 'Intrusion', 'AppCompromise', 'Intrusion.AppCompromise'),
(504, 'Intrusion', 'Botnet', 'Intrusion.Botnet'),
(600, 'Availability', NULL, 'Availability'),
(601, 'Availability', 'DoS', 'Availability.DoS'),
(602, 'Availability', 'DDoS', 'Availability.DDoS'),
(603, 'Availability', 'Sabotage', 'Availability.Sabotage'),
(604, 'Availability', 'Outage', 'Availability.Outage'),
(700, 'Information', NULL, 'Information'),
(701, 'Information', 'UnauthorizedAccess', 'Information.UnauthorizedAccess'),
(702, 'Information', 'UnauthorizedModification', 'Information.UnauthorizedModification'),
(800, 'Fraud', NULL, 'Fraud'),
(801, 'Fraud', 'UnauthorizedUsage', 'Fraud.UnauthorizedUsage'),
(802, 'Fraud', 'Copyright', 'Fraud.Copyright'),
(803, 'Fraud', 'Masquerade', 'Fraud.Masquerade'),
(804, 'Fraud', 'Phishing', 'Fraud.Phishing'),
(805, 'Fraud', 'Scam', 'Fraud.Scam'),
(900, 'Vulnerable', NULL, 'Vulnerable'),
(901, 'Vulnerable', 'Open', 'Vulnerable.Open'),
(1000, 'Anomaly', NULL, 'Anomaly'),
(1001, 'Anomaly', 'Traffic', 'Anomaly.Traffic'),
(1002, 'Anomaly', 'Connection', 'Anomaly.Connection'),
(1003, 'Anomaly', 'Protocol', 'Anomaly.Protocol'),
(1004, 'Anomaly', 'System', 'Anomaly.System'),
(1005, 'Anomaly', 'Application', 'Anomaly.Application'),
(1006, 'Anomaly', 'Behaviour', 'Anomaly.Behaviour'),
(9998, 'Other', '', 'Other'),
(9999, 'Test', '', 'Test');
-- --------------------------------------------------------
--
-- Table structure for table `clients`
--
CREATE TABLE IF NOT EXISTS `clients` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`registered` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
`requestor` varchar(256) NOT NULL,
`hostname` varchar(256) NOT NULL,
`service` varchar(256) NOT NULL,
`note` text NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '1',
`identity` varchar(64) NOT NULL,
`secret` varchar(16) NULL,
`read` tinyint(1) NOT NULL DEFAULT '1',
`debug` tinyint(1) NOT NULL DEFAULT '0',
`write` tinyint(1) NOT NULL DEFAULT '0',
`test` int(11) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci AUTO_INCREMENT=31 ;
--
-- Dumping data for table `clients`
--
INSERT INTO `clients` (`id`, `registered`, `requestor`, `hostname`, `service`, `note`, `valid`, `identity`, `secret`, `read`, `debug`, `write`, `test`) VALUES
(1, '0000-00-00 00:00:00', '', 'afrodita.civ.zcu.cz', 'hihat', '', 1, 'cz.zcu.civ.afrodita.hihat', NULL, 1, 0, 1, 0),
(2, '0000-00-00 00:00:00', '', 'afrodita.civ.zcu.cz', 'labrea', '', 1, 'cz.zcu.civ.afrodita.labrea', NULL, 1, 0, 1, 0),
(3, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'cesnet_ids', '', 1, 'cz.cesnet.au1.cesnet_ids', NULL, 1, 0, 1, 0),
(4, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'cesnet_sserv', '', 1, 'cz.cesnet.au1.cesnet_sserv', NULL, 1, 0, 1, 0),
(5, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6d-dorkbot', '', 1, 'cz.cesnet.au1.n6d-dorkbot', NULL, 1, 0, 1, 0),
(6, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6e-certplsinkhole', '', 1, 'cz.cesnet.au1.n6e-certplsinkhole', NULL, 1, 0, 1, 0),
(7, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6i-citadelsinkhole', '', 1, 'cz.cesnet.au1.n6i-citadelsinkhole', NULL, 1, 0, 1, 0),
(8, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6i-citadelsinkholeqd', '', 1, 'cz.cesnet.au1.n6i-citadelsinkholeqd', NULL, 1, 0, 1, 0),
(9, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6n-openntp', '', 1, 'cz.cesnet.au1.n6n-openntp', NULL, 1, 0, 1, 0),
(10, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'n6o-botszeroaccess', '', 1, 'cz.cesnet.au1.n6o-botszeroaccess', NULL, 1, 0, 1, 0),
(11, '0000-00-00 00:00:00', '', 'au1.cesnet.cz', 'report_n6v-virut', '', 1, 'cz.cesnet.au1.report_n6v-virut', NULL, 1, 0, 1, 0),
(12, '0000-00-00 00:00:00', '', 'au2.cesnet.cz', 'ids-cz', '', 1, 'cz.cesnet.au2.ids-cz', NULL, 1, 0, 1, 0),
(13, '0000-00-00 00:00:00', '', 'bee.net.vutbr.cz', 'hpscan', '', 1, 'cz.vutbr.net.bee.hpscan', NULL, 1, 0, 1, 0),
(14, '0000-00-00 00:00:00', '', 'buldog.vsb.cz', 'kippo', '', 1, 'cz.vsb.buldog.kippo', NULL, 1, 0, 1, 0),
(15, '0000-00-00 00:00:00', '', 'collector-nemea.liberouter.org', 'nemea', '', 1, 'org.liberouter.collector-nemea.nemea', NULL, 1, 0, 1, 0),
(16, '0000-00-00 00:00:00', '', 'collector.liberouter.org', 'hoststats', '', 1, 'org.liberouter.collector.hoststats', NULL, 1, 0, 1, 0),
(17, '0000-00-00 00:00:00', '', 'collector.liberouter.org', 'synscandetector_1_0', '', 1, 'org.liberouter.collector.synscandetector_1_0', NULL, 1, 0, 1, 0),
(18, '0000-00-00 00:00:00', '', 'holly.cesnet.cz', 'kippohoneypot', '', 1, 'cz.cesnet.holly.kippohoneypot', NULL, 1, 0, 1, 0),
(19, '0000-00-00 00:00:00', '', 'kryten.cesnet.cz', 'dionaeahoneypot', '', 1, 'cz.cesnet.kryten.dionaeahoneypot', NULL, 1, 0, 1, 0),
(20, '0000-00-00 00:00:00', '', 'mentat.cesnet.cz', 'mentat', '', 1, 'cz.cesnet.mentat.mentat', NULL, 1, 0, 1, 0),
(21, '0000-00-00 00:00:00', '', 'miel.opf.slu.cz', 'kippo', '', 1, 'cz.slu.opf.miel.kippo', NULL, 1, 0, 1, 0),
(22, '0000-00-00 00:00:00', '', 'nfsen.ics.muni.cz', 'honeyscan', '', 1, 'cz.muni.ics.nfsen.honeyscan', NULL, 1, 0, 1, 0),
(23, '0000-00-00 00:00:00', '', 'nfsen.ics.muni.cz', 'scandetector_1_0', '', 1, 'cz.muni.ics.nfsen.scandetector_1_0', NULL, 1, 0, 1, 0),
(24, '0000-00-00 00:00:00', '', 'nfsen.ics.muni.cz', 'sshbruteforce-1_n', '', 1, 'cz.muni.ics.nfsen.sshbruteforce-1_n', NULL, 1, 0, 1, 0),
(25, '0000-00-00 00:00:00', '', 'vinovago.cesnet.cz', 'fail2ban', '', 1, 'cz.cesnet.vinovago.fail2ban', NULL, 1, 0, 1, 0),
(26, '0000-00-00 00:00:00', '', 'ward.tul.cz', 'dionaeatul', '', 1, 'cz.tul.ward.dionaeatul', NULL, 1, 0, 1, 0),
(27, '0000-00-00 00:00:00', '', 'ward.tul.cz', 'kippo', '', 1, 'cz.tul.ward.kippo', NULL, 1, 0, 1, 0),
(28, '0000-00-00 00:00:00', 'kostenec@civ.zcu.cz', 'kostik.zcu.cz', 'com.example.test-node', '', 1, 'com.example.test-node', "Phaipe5ush7p", 1, 0, 0, 0),
(29, '0000-00-00 00:00:00', 'kostenec@civ.zcu.cz', 'kostik.zcu.cz', 'com.example.test-node2', '', 1, 'com.example.test-node2', NULL, 1, 0, 0, 0),
(30, '0000-00-00 00:00:00', 'kostenec@civ.zcu.cz', 'kostik.zcu.cz', 'Test', '', 1, 'com.example.test-node3', NULL, 1, 0, 0, 0),
(31, '2014-12-11 13:51:18', 'ph@cesnet.cz', 'grey.cesnet.cz', 'Test', '', 1, 'cz.cesnet.grey.test', "co3kaero5Ruv", 1, 1, 1, 0);
-- --------------------------------------------------------
--
-- Table structure for table `events`
--
CREATE TABLE IF NOT EXISTS `events` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`received` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`client_id` int(11) NOT NULL,
`data` text NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '1',
PRIMARY KEY (`id`),
KEY `id` (`id`,`client_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci AUTO_INCREMENT=1 ;
-- --------------------------------------------------------
--
-- Table structure for table `event_category_mapping`
--
CREATE TABLE IF NOT EXISTS `event_category_mapping` (
`event_id` int(11) NOT NULL,
`category_id` int(11) NOT NULL,
KEY `event_id_2` (`event_id`,`category_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `event_tag_mapping`
--
CREATE TABLE IF NOT EXISTS `event_tag_mapping` (
`event_id` int(11) NOT NULL,
`tag_id` int(11) NOT NULL,
KEY `event_id_2` (`event_id`,`tag_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `last_events`
--
CREATE TABLE IF NOT EXISTS `last_events` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`client_id` int(11) NOT NULL,
`event_id` int(11) NOT NULL,
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `client_id` (`client_id`,`event_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci AUTO_INCREMENT=1 ;
-- --------------------------------------------------------
--
-- Table structure for table `tags`
--
CREATE TABLE IF NOT EXISTS `tags` (
`id` int(11) NOT NULL,
`tag` varchar(64) NOT NULL,
KEY `id_tag_name` (`id`,`tag`),
KEY `tag_name` (`tag`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci;
--
-- Dumping data for table `tags`
--
INSERT INTO `tags` (`id`, `tag`) VALUES
(1, 'Connection'),
(2, 'Datagram'),
(3, 'Content'),
(4, 'Data'),
(5, 'File'),
(6, 'Flow'),
(7, 'Log'),
(8, 'Protocol'),
(9, 'Host'),
(10, 'Network'),
(11, 'Correlation'),
(12, 'External'),
(13, 'Reporting'),
(99, 'Other');
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2013 Cesnet z.s.p.o
# Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
import sys
import os
import logging
import logging.handlers
import ConfigParser
from traceback import format_tb
import M2Crypto.X509
import json
import MySQLdb as my
import MySQLdb.cursors as mycursors
from collections import namedtuple
from uuid import uuid4
from time import time, gmtime
from math import trunc
from io import BytesIO
from urlparse import parse_qs
from os import path
from random import randint
# for local version of up to date jsonschema
sys.path.append(path.join(path.dirname(__file__), "..", "lib"))
from jsonschema import Draft4Validator, FormatChecker
VERSION = "3.0-not-even-alpha"
class Error(Exception):
def __init__(self, message, error=500, exc=None,
method=None, req_id=None, detail=None):
self.message = message
self.error = int(error)
(self.exctype, self.excval, self.exctb) = exc or (None, None, None)
self.cause = self.excval # compatibility with other exceptions
self.method = method
self.req_id = req_id
self.detail = detail
def __str__(self):
out = []
out.append("Error(%s)" % (self.error))
if self.method is not None:
out.append(" in \"%s\"" % self.method)
if self.message is not None:
out.append(": %s" % self.message)
if self.excval is not None:
out.append(" - cause was %s: %s" % (type(self.excval).__name__, str(self.excval)))
return "".join(out)
def info_str(self):
return ("Detail: %s" % self.detail) if self.detail else ""
def debug_str(self):
out = []
if self.excval is not None:
out.append("Exception %s: %s\n" % (type(self.excval).__name__, str(self.excval)))
if self.exctb is not None:
out.append("Traceback:\n%s" % "".join(format_tb(self.exctb)))
return "".join(out)
def to_dict(self):
d = {}
if self.error is not None:
d["error"] = self.error
if self.method is not None:
d["method"] = self.method
if self.message is not None:
d["message"] = self.message
if self.detail is not None:
d["detail"] = self.detail
if self.req_id is not None:
d["req_id"] = self.req_id
return d
def get_clean_root_logger(level=logging.INFO):
""" Attempts to get logging module into clean slate state """
# We want to be able to set up at least stderr logger before any
# configuration is read, and then later get rid of it and set up
# whatever administrator requires.
# However, there can exist only one logger, but we want to get a clean
# slate everytime we initialize StreamLogger or FileLogger... which
# is not exactly supported by logging module.
# So, we look directly inside logger class and clean up handlers/filters
# manually.
logger = logging.getLogger() # no need to create new
logger.setLevel(level)
while logger.handlers:
logger.removeHandler(logger.handlers[0])
while logger.filters:
logger.removeFilter(logger.filters[0])
return logger
def StreamLogger(stream=sys.stderr, level=logging.INFO):
""" Fallback handler just for setup, not meant to be used from
configuration file because during wsgi query stdout/stderr
is forbidden.
"""
fhand = logging.StreamHandler(stream)
fform = logging.Formatter('%(asctime)s %(filename)s[%(process)d]: (%(levelname)s) %(message)s')
fhand.setFormatter(fform)
logger = get_clean_root_logger(level)
logger.addHandler(fhand)
class LogRequestFilter(logging.Filter):
""" Filter class, instance of which is added to logger class to add
info about request automatically into every logline, no matter
how it came into existence.
"""
def __init__(self, req):
logging.Filter.__init__(self)
self.req = req
def filter(self, record):
if self.req.env:
record.req_preamble = "%08x/%s: " % (self.req.req_id or 0, self.req.path)
else:
record.req_preamble = ""
return True
def FileLogger(req, filename, level=logging.INFO):
fhand = logging.FileHandler(filename)
fform = logging.Formatter('%(asctime)s %(filename)s[%(process)d]: (%(levelname)s) %(req_preamble)s%(message)s')
fhand.setFormatter(fform)
ffilt = LogRequestFilter(req)
logger = get_clean_root_logger(level)
logger.addFilter(ffilt)
logger.addHandler(fhand)
logging.info("Initialized FileLogger(req=%s, filename=\"%s\", \"%s\")" % (type(req).__name__, filename, level))
def SysLogger(req, socket="/dev/log", facility=logging.handlers.SysLogHandler.LOG_DAEMON, level=logging.INFO):
fhand = logging.handlers.SysLogHandler(address=socket, facility=facility)
fform = logging.Formatter('%(filename)s[%(process)d]: (%(levelname)s) %(message)s')
fhand.setFormatter(fform)
ffilt = LogRequestFilter(req)
logger = get_clean_root_logger(level)
logger.addFilter(ffilt)
logger.addHandler(fhand)
logging.info("Initialized SysLogger(req=%s, socket=\"%s\", facility=\"%s\", level=\"%s\")" % (type(req).__name__, socket, facility, level))
class Client(namedtuple("ClientTuple",
["id", "registered", "requestor", "hostname", "service", "note",
"identity", "secret", "read", "debug", "write", "test"])):
def __str__(self):
return (
"%s(id=%i, registered=%s, requestor=\"%s\", hostname=\"%s\", "
"service=\"%s\", note=\"%s\", identity=\"%s\", secret=%s, "
"read=%i, debug=%i, write=%i, test=%i)") % (
type(self).__name__, self.id, self.registered,
self.requestor, self.hostname, self.service, self.note,
self.identity, "..." if self.secret is not None else "None",
self.read, self.debug, self.write, self.test)
class Object(object):
def __str__(self):
return "%s()" % type(self).__name__
class Request(Object):
""" Simple container for info about ongoing request.
One instance gets created before server startup, and all other
configured objects get it as parameter during instantiation.
Server then takes care of populating this instance on the start
of wsgi request (and resetting at the end). All other objects
then can find this actual request info in their own self.req.
However, only Server.wsgi_app, handler (WardenHandler) exposed
methods and logging related objects should use self.req directly.
All other objects should use self.req only as source of data for
error/exception handling/logging, and should take/return
necessary data as arguments/return values for clarity on
which data their main codepaths work with.
"""
def __init__(self):
Object.__init__(self)
self.reset()
def __str__(self):
return "%s()" % (type(self).__name__, str(self.env), str(self.client))
def reset(self, env=None, client=None, path=None, req_id=None):
self.env = env
self.client = client
self.path = path or ""
if req_id is not None:
self.req_id = req_id
else:
self.req_id = 0 if env is None else randint(0x00000000, 0xFFFFFFFF)
def error(self, message, error=500, exc=None, detail=None):
return Error(message, error, exc, self.path, self.req_id, detail=detail)
class ObjectReq(Object):
def __init__(self, req):
Object.__init__(self)
self.req = req
def __str__(self):
return "%s(req=%s)" % (type(self).__name__, type(self.req).__name__)
class NoAuthenticator(ObjectReq):
def __init__(self, req):
ObjectReq.__init__(self, req)
def authenticate (self, env, args):
return "anybody" # or None
def authorize(self, env, client, path, method):
return (client is not None)
class X509Authenticator(NoAuthenticator):
def __init__(self, req, db):
NoAuthenticator.__init__(self, req)
self.db = db
def __str__(self):
return "%s(req=%s, db=%s)" % (type(self).__name__, type(self.req).__name__, type(self.db).__name__)
def get_cert_dns_names(self, pem):
cert = M2Crypto.X509.load_cert_string(pem)
subj = cert.get_subject()
commons = [n.get_data().as_text() for n in subj.get_entries_by_nid(subj.nid["CN"])]
ext = cert.get_ext("subjectAltName")
extstrs = [val.strip() for val in ext.get_value().split(",")]
altnames = [val[4:] for val in extstrs if val.startswith("DNS:")]
# bit of mangling to get rid of duplicates and leave commonname first
firstcommon = commons[0]
return [firstcommon] + list(set(altnames+commons) - set([firstcommon]))
def authenticate (self, env, args):
try:
cert_names = self.get_cert_dns_names(env["SSL_CLIENT_CERT"])
except:
logging.info("authenticate: cannot get or parse certificate from env")
return None
identity = args.get("client", [None])[0]
secret = args.get("secret", [None])[0]
client = self.db.get_client_by_name(cert_names, identity, secret)
if not client:
logging.info("authenticate: client not found by identity: \"%s\", secret: %s, cert_names: %s" % (
identity, "..." if secret else "None", str(cert_names)))
return None
# Clients with 'secret' set muset get authorized by it.
# No secret turns auth off for this particular client.
if client.secret is not None and secret is None:
logging.info("authenticate: missing secret argument")
return None
logging.info("authenticate: %s" % str(client))
return client
def authorize(self, env, client, path, method):
if method.debug:
if not client.debug:
logging.info("authorize: failed, client does not have debug enabled")
return None
return client
if method.read:
if not client.read:
logging.info("authorize: failed, client does not have read enabled")
return None
return client
if method.write:
if not (client.write or client.test):
logging.info("authorize: failed, client is not allowed to write or test")
return None
return client
class NoValidator(ObjectReq):
def __init__(self, req):
ObjectReq.__init__(self, req)
def __str__(self):
return "%s(req=%s)" % (type(self).__name__, type(self.req).__name__)
def check(self, event):
return []
class JSONSchemaValidator(NoValidator):
def __init__(self, req, filename=None):
NoValidator.__init__(self, req)
self.path = filename or path.join(path.dirname(__file__), "idea.schema")
with open(self.path) as f:
self.schema = json.load(f)
self.validator = Draft4Validator(self.schema, format_checker=FormatChecker())
def __str__(self):
return "%s(req=%s, filename=\"%s\")" % (type(self).__name__, type(self.req).__name__, self.path)
def check(self, event):
def sortkey(k):
""" Treat keys as lowercase, prefer keys with less path segments """
return (len(k.path), "/".join(str(k.path)).lower())
res = []
for error in sorted(self.validator.iter_errors(event), key=sortkey):
res.append(
"Validation error: key \"%s\", value \"%s\", expected - %s" % (
u"/".join(str(v) for v in error.path),
error.instance,
error.schema.get('description', 'no additional info')))
return res
class MySQL(ObjectReq):
def __init__(self, req, host, user, password, dbname, port, catmap_filename, tagmap_filename):
ObjectReq.__init__(self, req)
self.host = host
self.user = user
self.password = password
self.dbname = dbname
self.port = port
self.catmap_filename = catmap_filename
self.tagmap_filename = tagmap_filename
with open(catmap_filename, "r") as catmap_fd:
self.catmap = json.load(catmap_fd)
self.catmap_other = self.catmap["Other"] # Catch error soon, avoid lookup later
with open(tagmap_filename, "r") as tagmap_fd:
self.tagmap = json.load(tagmap_fd)
self.tagmap_other = self.catmap["Other"] # Catch error soon, avoid lookup later
self.con = my.connect(host=self.host, user=self.user, passwd=self.password,
db=self.dbname, port=self.port, cursorclass=mycursors.DictCursor)
self.crs = self.con.cursor()
def __str__(self):
return "%s(req=%s, host='%s', user='%s', dbname='%s', port=%d, catmap_filename=\"%s\", tagmap_filename=\"%s\")" % (
type(self).__name__, type(self.req).__name__, self.host, self.user, self.dbname, self.port, self.catmap_filename, self.tagmap_filename)
def _get_comma_perc(self, l):
return ','.join(['%s'] * len(l))
def _get_not(self, b):
return "" if b else "NOT"
def get_client_by_name(self, cert_names, identity=None, secret=None):
query = ["SELECT id, registered, requestor, hostname, service, note, identity, secret, `read`, debug, `write`, test FROM clients WHERE valid = 1"]
params = []
if identity:
query.append(" AND identity = %s")
params.append(identity)
if secret:
query.append(" AND secret = %s")
params.append(secret)
query.append(" AND hostname IN (%s)" % self._get_comma_perc(cert_names))
params.extend(cert_names)
self.crs.execute("".join(query), params)
rows = self.crs.fetchall()
if len(rows)>1:
logging.warn("get_client_by_name: query returned more than one result: %s" % ", ".join(
[str(Client(**row)) for row in rows]))
return None
return Client(**rows[0]) if rows else None
def get_debug(self):
self.crs.execute("SELECT VERSION() AS VER")
row = self.crs.fetchone()
self.crs.execute("SHOW TABLE STATUS")
tablestat = self.crs.fetchall()
return {
"db": "MySQL",
"version": row["VER"],
"tables": tablestat
}
def getMaps(self, section, variables):
maps = []
for v in variables:
try:
mapped = section[v]
except KeyError:
raise self.req.error("Wrong tag or category used in query.", 422,
sys.exc_info(), detail={"key": v})
maps.append(mapped)
return set(maps) # unique
def fetch_events(self, client, id, count,
cat=None, nocat=None,
tag=None, notag=None,
group=None, nogroup=None):
logging.debug("fetch_events: id=%i, count=%i, cat=%s, nocat=%s, tag=%s, notag=%s, group=%s, nogroup=%s" % (id, count, str(cat), str(nocat), str(tag), str(notag), str(group), str(nogroup)))
if cat and nocat:
raise self.req.error("Unrealizable conditions. Choose cat or nocat option.", 422,
detail={'cat': cat, 'nocat' : nocat})
if tag and notag:
raise self.req.error("Unrealizable conditions. Choose tag or notag option.", 422,
detail={'tag': cat, 'notag' : nocat})
if group and nogroup:
raise self.req.error("Unrealizable conditions. Choose group or nogroup option.", 422,
detail={'tag': cat, 'notag' : nocat})
query = ["SELECT e.id, e.data FROM clients c RIGHT JOIN events e ON c.id = e.client_id WHERE e.id > %s"]
params = [id or 0]
if cat or nocat:
cats = self.getMaps(self.catmap, (cat or nocat))
query.append(
" AND e.id %s IN (SELECT event_id FROM event_category_mapping WHERE category_id IN (%s))" % (
self._get_not(cat), self._get_comma_perc(cats)))
params.extend(cats)
if tag or notag:
tags = self.getMaps(self.tagmap, (tag or notag))
query.append(
" AND e.id %s IN (SELECT event_id FROM event_tag_mapping WHERE tag_id IN (%s))" % (
self._get_not(tag), self._get_comma_perc(tags)))
params.extend(tags)
if group or nogroup:
subquery = []
for identity in (group or nogroup):
subquery.append("c.identity = %s") # exact client
params.append(identity)
subquery.append("c.identity LIKE %s") # whole subtree
params.append(identity + ".%")
query.append(" AND %s (%s)" % (self._get_not(group), " OR ".join(subquery)))
query.append(" AND e.valid = 1 LIMIT %s")
params.append(count)
query_string = "".join(query)
logging.debug("fetch_events: query - %s" % query_string)
logging.debug("fetch_events: params - %s", str(params))
self.crs.execute(query_string, params)
row = self.crs.fetchall()
if row:
maxid = max(r['id'] for r in row)
else:
maxid = self.getLastEventId()
events = [json.loads(r["data"]) for r in row]
return {
"lastid": maxid,
"events": events
}
def store_event(self, client, event):
try:
self.crs.execute("INSERT INTO events (received,client_id,data) VALUES (NOW(), %s, %s)", (client.id, json.dumps(event)))
lastid = self.crs.lastrowid
catlist = event.get('Category', ["Other"])
cats = set(catlist) | set(cat.split(".", 1)[0] for cat in catlist)
for cat in cats:
cat_id = self.catmap.get(cat, self.catmap_other)
self.crs.execute("INSERT INTO event_category_mapping (event_id,category_id) VALUES (%s, %s)", (lastid, cat_id))
try:
tags = event['Node'][0]['Tags']
except (KeyError, IndexError):
tags = []
for tag in tags:
tag_id = self.tagmap.get(tag, self.tagmap_other)
self.crs.execute("INSERT INTO event_tag_mapping (event_id,tag_id) VALUES (%s, %s)", (lastid, tag_id))
self.con.commit()
return []
except Exception as e:
self.con.rollback()
return [type(e).__name__ + ": " + str(e)]
def insertLastReceivedId(self, client, id):
logging.debug("insertLastReceivedId: id %i for client %i(%s)" % (id, client.id, client.hostname))
self.crs.execute("INSERT INTO last_events(client_id, event_id, timestamp) VALUES(%s, %s, NOW())", (client.id, id))
self.con.commit()
def getLastEventId(self):
self.crs.execute("SELECT MAX(id) as id FROM events")
row = self.crs.fetchone()
return row['id'] or 0
def getLastReceivedId(self, client):
self.crs.execute("SELECT MAX(event_id) as id FROM last_events WHERE client_id = %s", client.id)
row = self.crs.fetchone()
id = row['id'] if row is not None else 0
logging.debug("getLastReceivedId: id %i for client %i(%s)" % (id, client.id, client.hostname))
return id
def expose(read=1, write=0, debug=0):
def expose_deco(meth):
meth.exposed = True
meth.read = read
meth.write = write
meth.debug = debug
return meth
return expose_deco
class Server(ObjectReq):
def __init__(self, req, auth, handler):
ObjectReq.__init__(self, req)
self.auth = auth
self.handler = handler
def __str__(self):
return "%s(req=%s, auth=%s, handler=%s)" % (type(self).__name__, type(self.req).__name__, type(self.auth).__name__, type(self.handler).__name__)
def sanitize_args(self, path, func, args, exclude=["self"]):
# silently remove internal args, these should never be used
# but if somebody does, we do not expose them by error message
intargs = set(args).intersection(exclude)
for a in intargs:
del args[a]
if intargs:
logging.info("sanitize_args: Called with internal args: %s" % ", ".join(intargs))
# silently remove surplus arguments - potential forward
# compatibility (unknown args will get ignored)
badargs = set(args) - set(func.func_code.co_varnames[0:func.func_code.co_argcount])
for a in badargs:
del args[a]
if badargs:
logging.info("sanitize_args: Called with superfluous args: %s" % ", ".join(badargs))
return args
def wsgi_app(self, environ, start_response, exc_info=None):
path = environ.get("PATH_INFO", "").lstrip("/")
self.req.reset(env=environ, path=path)
output = ""
status = "200 OK"
headers = [('Content-type', 'application/json')]
exception = None
try:
try:
injson = environ['wsgi.input'].read()
except:
raise self.req.error("Data read error.", 408, sys.exc_info())
try:
method = getattr(self.handler, path)
method.exposed # dummy access to trigger AttributeError
except Exception:
raise self.req.error("You've fallen of the cliff.", 404)
self.req.args = args = parse_qs(environ.get('QUERY_STRING', ""))
self.req.client = client = self.auth.authenticate(environ, args)
if not client:
raise self.req.error("I'm watching. Authenticate.", 403)
try:
events = json.loads(injson) if injson else None
except Exception as e:
raise self.req.error("Deserialization error.", 400,
sys.exc_info(), detail={"args": injson, "parser": str(e)})
if events:
args["events"] = events
auth = self.auth.authorize(self.req.env, self.req.client, self.req.path, method)
if not auth:
raise self.req.error("I'm watching. Not authorized.", 403, detail={"client": client.identity})
# These args are not for handler
args.pop("client", None)
args.pop("secret", None)
args = self.sanitize_args(path, method, args)
result = method(**args) # call requested method
try:
# 'default': takes care of non JSON serializable objects,
# which could (although shouldn't) appear in handler code
output = json.dumps(result, default=lambda v: str(v))
except Exception as e:
raise self.req.error("Serialization error", 500,
sys.exc_info(), detail={"args": str(result)})
except Error as e:
exception = e
except Exception as e:
exception = self.req.error("Server exception", 500, sys.exc_info())
if exception:
status = "%d %s" % (exception.error, exception.message)
result = exception.to_dict()
try:
output = json.dumps(result, default=lambda v: str(v))
except Exception as e:
# Here all bets are off, generate at least sane output
output = '{"error": %d, "message": "%s"}' % (
exception.error, exception.message)
logging.error(str(exception))
i = exception.info_str()
if i:
logging.info(i)
d = exception.debug_str()
if d:
logging.debug(d)
headers.append(('Content-Length', str(len(output))))
start_response(status, headers)
self.req.reset()
return [output]
__call__ = wsgi_app
class WardenHandler(ObjectReq):
def __init__(self, req, validator, db, auth,
send_events_limit=100000, get_events_limit=100000,
description=None):
ObjectReq.__init__(self, req)
self.auth = auth
self.db = db
self.validator = validator
self.send_events_limit = send_events_limit
self.get_events_limit = get_events_limit
self.description = description
def __str__(self):
return "%s(req=%s, validator=%s, db=%s, send_events_limit=%s, get_events_limit=%s, description=\"%s\")" % (
type(self).__name__, type(self.req).__name__, type(self.validator).__name__, type(self.db).__name__,
self.get_events_limit, self.send_events_limit, self.description)
@expose(read=1, debug=1)
def getDebug(self):
return {
"environment": self.req.env,
"client": self.req.client.__dict__,
"database": self.db.get_debug(),
"system": {
"uname": os.uname()
},
"process": {
"cwd": os.getcwdu(),
"pid": os.getpid(),
"ppid": os.getppid(),
"pgrp": os.getpgrp(),
"uid": os.getuid(),
"gid": os.getgid(),
"euid": os.geteuid(),
"egid": os.getegid(),
"groups": os.getgroups()
}
}
@expose(read=1)
def getInfo(self):
info = {
"version": VERSION,
"send_events_limit": self.send_events_limit,
"get_events_limit": self.get_events_limit
}
if self.description:
info["description"] = self.description
return info
@expose(read=1)
def getEvents(self, id=None, count=None,
cat=None, nocat=None,
tag=None, notag=None,
group=None, nogroup=None):
try:
id = int(id[0])
except (ValueError, TypeError, IndexError):
id = None
if id is None:
try:
id = self.db.getLastReceivedId(self.req.client)
except Exception, e:
logging.info("cannot getLastReceivedId - " + type(e).__name__ + ": " + str(e))
if id is None:
# First access, remember the guy and get him last event
id = self.db.getLastEventId()
self.db.insertLastReceivedId(self.req.client, id)
return {
"lastid": id,
"events": []
}
try:
count = int(count[0])
except (ValueError, TypeError, IndexError):
count = self.get_events_limit
if self.get_events_limit:
count = min(count, self.get_events_limit)
res = self.db.fetch_events(self.req.client, id, count, cat, nocat, tag, notag, group, nogroup)
self.db.insertLastReceivedId(self.req.client, res['lastid'])
logging.info("sending %d events, lastid is %i" % (len(res["events"]), res["lastid"]))
return res
def checkNode(self, event, identity):
try:
ev_id = event['Node'][0]['Name'].lower()
except (KeyError, TypeError):
# Event does not bear valid Node attribute
return ["Event does not bear valid Node attribute"]
if ev_id != identity:
return ["Node does not correspond with saving client"]
return []
@expose(write=1)
def sendEvents(self, events=[]):
if not isinstance(events, list):
raise self.req.error("List of events expected.", 400)
if len(events)>self.send_events_limit:
raise self.req.error("Too much events in one batch.", 413,
detail={"limit": self.send_events_limit})
saved = 0
errs = {}
for i, event in enumerate(events):
v_errs = self.validator.check(event)
if v_errs:
errs[i] = v_errs
continue
node_errs = self.checkNode(event, self.req.client.identity)
if node_errs:
errs[i] = node_errs
continue
if self.req.client.test and not 'Test' in event.get('Category', []):
errs[i] = ["You're allowed to send only messages, containing \"Test\" among categories."]
continue
db_errs = self.db.store_event(self.req.client, event)
if db_errs:
errs[i] = db_errs
continue
saved += 1
logging.info("Saved %i events" % saved)
if errs:
raise self.req.error("Errors saving some messages.", 422,
detail={"errors": errs})
return saved
def read_ini(path):
c = ConfigParser.RawConfigParser()
res = c.read(path)
if not res or not path in res:
# We don't have loggin yet, hopefully this will go into webserver log
raise Error("Unable to read config: %s" % path)
data = {}
for sect in c.sections():
for opts in c.options(sect):
lsect = sect.lower()
if not lsect in data:
data[lsect] = {}
data[lsect][opts] = c.get(sect, opts)
return data
def read_cfg(path):
with open(path, "r") as f:
stripcomments = "\n".join((l for l in f if not l.lstrip().startswith("#")))
conf = json.loads(stripcomments)
# Lowercase keys
conf = dict((sect.lower(), dict(
(subkey.lower(), val) for subkey, val in subsect.iteritems())
) for sect, subsect in conf.iteritems())
return conf
def fallback_wsgi(environ, start_response, exc_info=None):
# If server does not start, set up simple server, returning
# Warden JSON compliant error message
error=503
message="Server not running due to initialization error"
headers = [('Content-type', 'application/json')]
logline = "Error(%d): %s" % (error, message)
status = "%d %s" % (error, message)
output = '{"error": %d, "message": "%s"}' % (
error, message)
logging.critical(logline)
start_response(status, headers)
return [output]
def build_server(conf):
# Functions for validation and conversion of config values
def facility(name):
return int(getattr(logging.handlers.SysLogHandler, "LOG_" + name.upper()))
def loglevel(name):
return int(getattr(logging, name.upper()))
def natural(name):
num = int(name)
if num<1:
raise ValueError("Not a natural number")
return num
def filepath(name):
# Make paths relative to dir of this script
return path.join(path.dirname(__file__), name)
def objdef(name):
return objects[name.lower()]
obj = objdef # Draw into local namespace for init_obj
objects = {} # Already initialized objects
# List of sections and objects, configured by them
# First object in each object list is the default one, otherwise
# "type" keyword in section may be used to choose other
section_def = {
"log": ["FileLogger", "SysLogger"],
"db": ["MySQL"],
"auth": ["X509Authenticator", "NoAuthenticator"],
"validator": ["JSONSchemaValidator", "NoValidator"],
"handler": ["WardenHandler"],
"server": ["Server"]
}
# Object parameter conversions and defaults
param_def = {
"FileLogger": {
"req": {"type": obj, "default": "req"},
"filename": {"type": filepath, "default": path.join(path.dirname(__file__), path.splitext(path.split(__file__)[1])[0] + ".log")},
"level": {"type": loglevel, "default": "info"},
},
"SysLogger": {
"req": {"type": obj, "default": "req"},
"socket": {"type": filepath, "default": "/dev/log"},
"facility": {"type": facility, "default": "daemon"},
"level": {"type": loglevel, "default": "info"}
},
"NoAuthenticator": {
"req": {"type": obj, "default": "req"}
},
"X509Authenticator": {
"req": {"type": obj, "default": "req"},
"db": {"type": obj, "default": "db"}
},
"NoValidator": {
"req": {"type": obj, "default": "req"},
},
"JSONSchemaValidator": {
"req": {"type": obj, "default": "req"},
"filename": {"type": filepath, "default": path.join(path.dirname(__file__), "idea.schema")}
},
"MySQL": {
"req": {"type": obj, "default": "req"},
"host": {"type": str, "default": "localhost"},
"user": {"type": str, "default": "warden"},
"password": {"type": str, "default": ""},
"dbname": {"type": str, "default": "warden3"},
"port": {"type": natural, "default": 3306},
"catmap_filename": {"type": filepath, "default": path.join(path.dirname(__file__), "catmap_mysql.json")},
"tagmap_filename": {"type": filepath, "default": path.join(path.dirname(__file__), "tagmap_mysql.json")}
},
"WardenHandler": {
"req": {"type": obj, "default": "req"},
"validator": {"type": obj, "default": "validator"},
"db": {"type": obj, "default": "DB"},
"auth": {"type": obj, "default": "auth"},
"send_events_limit": {"type": natural, "default": 10000},
"get_events_limit": {"type": natural, "default": 10000},
"description": {"type": str, "default": ""}
},
"Server": {
"req": {"type": obj, "default": "req"},
"auth": {"type": obj, "default": "auth"},
"handler": {"type": obj, "default": "handler"}
}
}
def init_obj(sect_name):
config = conf.get(sect_name, {})
sect_name = sect_name.lower()
sect_def = section_def[sect_name]
try: # Object type defined?
objtype = config["type"]
del config["type"]
except KeyError: # No, fetch default object type for this section
objtype = sect_def[0]
else:
if not objtype in sect_def:
raise KeyError("Unknown type %s in section %s" % (objtype, sect_name))
params = param_def[objtype]
# No surplus parameters? Disallow also 'obj' attributes, these are only
# to provide default referenced section
for name in config:
if name not in params or (name in params and params[name]["type"] is objdef):
raise KeyError("Unknown key %s in section %s" % (name, sect_name))
# Process parameters
kwargs = {}
for name, definition in params.iteritems():
raw_val = config.get(name, definition["default"])
try:
val = definition["type"](raw_val)
except Exception:
raise KeyError("Bad value \"%s\" for %s in section %s" % (raw_val, name, sect_name))
kwargs[name] = val
cls = globals()[objtype] # get class/function type
try:
obj = cls(**kwargs) # run it
except Exception as e:
raise KeyError("Cannot initialize %s from section %s: %s" % (
objtype, sect_name, str(e)))
if isinstance(obj, Object):
# Log only objects here, functions must take care of themselves
logging.info("Initialized %s" % str(obj))
objects[sect_name] = obj
return obj
# Init logging with at least simple stderr StreamLogger
# Dunno if it's ok within wsgi, but we have no other choice, let's
# hope it at least ends up in webserver error log
StreamLogger()
# Shared container for common data of ongoing WSGI request
objects["req"] = Request()
try:
# Now try to init required objects
for o in ("log", "db", "auth", "validator", "handler", "server"):
init_obj(o)
except Exception as e:
logging.critical(str(e))
logging.debug("", exc_info=sys.exc_info())
return fallback_wsgi
logging.info("Ready to serve")
return objects["server"]
if __name__=="__main__":
# FIXME: just development stuff
srv = build_server(read_ini("warden3.cfg.wheezy-warden3"))
BSD License
Copyright © 2011-2015 Cesnet z.s.p.o
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Cesnet z.s.p.o nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE Cesnet z.s.p.o BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+---------------------------------+
| Warden Client Library 3.0-beta3 |
+---------------------------------+
Content
A. Introduction
B. Quick start (TL;DR)
C. Concepts
D. HTTP/JSON API
E. Python API
------------------------------------------------------------------------------
A. Introduction
The main goal of Warden 3 is to address the shortcomings, which emerged
during several years of Warden 2.X operation. Warden 3 uses flexible and
descriptive event format, based on JSON. Warden 3 protocol is based on plain
HTTPS queries with help of JSON (Warden 2 SOAP is heavyweight, outdated and
draws in many dependencies). Clients can be multilanguage, unlike SOAP/HTTPS,
plain HTTPS and JSON is mature in many mainstream programming languages.
Server is written in Python - mature language with consistent and coherent
libraries and many skilled developers.
------------------------------------------------------------------------------
B. Quick start (TL;DR)
* Obtain X509 key/cert, corresponding with DNS name of you machine.
* Obtain X509 CA chain for server validation.
* Choose client name ("reverse DNS", like org.example.warden.client, but
it does not necessary need to correspond to your machine's DNS name).
* Ask Warden server admins for registration. They will want to know at least
client name and dns name, and short description of the (planned) client
and its purpose. Work with them. They may request some changes or
clarifications, offer you useful guidelines, provide you with alternative
sandbox URL, etc.
If succesful, you will receive authentication secret.
* Use warden_curl_test.sh to check you are able to talk to server.
* See warden_client_examples.py on how to integrate sending/receiving
into your Python application.
* Alternatively, check 'contrib' directory in Warden GIT for various
ready to use tools or recipes. You may find senders for various
honeypots, or warden_filer may get handy if you do not want to delve
into Python at all.
* Welcome! Thanks for participating in in the data exchange to improve
network security awareness.
------------------------------------------------------------------------------
C. Concepts
C.1. Event description format
IDEA - Intrusion Detection Extensible Alert, flexible extensible format
for security events, see:
https://idea.cesnet.cz/
C.2. Event serial ID
Each received event gets assigned integer serial number. These numbers are
sequential, so each recipient can keep track of the last event "id" it
received and next time ask only for following events.
C.3. Authentication
In Warden 2, clients get authenticated by server certificate, however
server certificate is usually same for the whole machine, so individual
clients are differentiated only by telling their own name. However, client name
is widely known, so this allows for client impersonation within one machine.
Warden 3 slightly improves this schema by replacing client name in
authentication phase by "secret", random string, shared among particular
client and main server, which makes it harder to forge client identity (be it
by mistake or intentional).
However, best solution for these cases is of course specific certificate
for each particular client (which is also fully supported).
Client also has to have server CA certificate (or chain) at its disposal
to be able to verify server authenticity.
C.4. Client name
Unlike Warden 2, client names in Warden 3 have hierarchy. Modelled after
Java class names, client name is dot separated list of labels, with
significance from left to right – leftmost denoting largest containing realm,
rightmost denoting single entity.
Country.organisation.suborganizations.machine.local realm scheme akin to
"org.example.csirt.northwest.honeypot.jabberwock" is strongly recommended.
Label case is significant, label can contain only letters, numbers or
underscore and must not start with number.
The reason is the possibility to filter incoming events based not only on
particular client, or (for some recipients flawed) notion of "own" messages,
but based on wider units.
------------------------------------------------------------------------------
D. HTTP/JSON API
Client must know the base URL of the Warden server. Warden 3 accepts
queries on paths under base URL (which correspond to called method), with
usual query string variable=data pairs separated by ampersand as arguments.
Multivalues are specified by repeating same variable with each value several
times.
https://warden.example.org/warden3/getEvents?secret=PwD&cat=Abusive.Spam&cat=Fraud.Phishing
\________________ _______________/ \___ ___/ \____ ___/ \______ _______/ \________ _______/
V V V V V
Base URL --' | | | |
Called method ------------------------' | | |
Key/value pair -----------------------------------' | |
Multivalue ------------------------------------------------'------------------'
Method may expect bulk data (events to save, for example) - query then
must be POST, with POST JSON data, formed appropriately as documented in
particular method.
If HTTPS call succeeds (200 OK), method returns JSON object containing
requested data.
D.1. Error handling
Should the call fail, server returns HTTP status code, together with JSON
object, describing the errors (there may be multiple ones, especially when
sending events). The keys of the object, which may be available, are:
* method - name of the method called
* req_id - unique identifier or the request (for troubleshooting, Warden
administrator can uniquely identify related log lines)
* errors - always present list of JSON objects, which contain:
* error - HTTP status code
* message - human readable error description
* Other context dependent fields may appear, see particular method
description.
Client errors (4xx) are considered permanent - client must not try to send
same event again as it will always get rejected - client administrator
will need to inspect logs and rectify the cause.
Server errors (5xx) may be considered by client as temporary and client is
advised to try again after reasonable recess.
D.2. Common arguments
* secret - shared secret, assigned to client during registration
* client - client name, optional, can be used to mimic Warden 2
authentication behavior if explicitly allowed for this client by server
administrator
= getEvents =
Fetches events from server.
Arguments:
* count - number of requested events
* id - starting serial number requested, id of all received events will
be greater
* cat, nocat - selects only events with categories, which are/are not
present in the event Category field (mutually exclusive)
* group, nogroup - selects only events originated/not originated from
this realms and/or client names, as denoted in the event Node.Name field
(mutually exclusive)
* tag, notag - selects only events with/without this client description
tags, as denoted in the event Node.Type field (mutually exclusive)
Returns:
* lastid - serial number of the last received event
* events - array of Idea events
Example:
$ curl \
--key key.pem \
--cert cert.pem \
--cacert ca.pem \
--connect-timeout 3 \
--request POST \
\
"https://warden.example.org/getEvents?\
secret=SeCrEt\
&count=1\
&nogroup=org.example\
&cat=Abusive.Spam\
&cat=Fraud.Phishing"
{"lastid": 581,
"events": [{
"Format": "IDEA0",
"DetectTime": "2015-02-03T09:55:21.563638Z",
"Target": [{"URL": ["http://example.com/kocHq"]}],
"Category": ["Fraud.Phishing"],
"Note": "Example event"}]}
= sendEvents =
Uploads events to server.
Arguments:
* POST data - JSON array of Idea events
Returns:
Returns object with number of saved messages in "saved" attribute.
In case of error, multiple errors may be returned in "errors" list (see
[[Warden3#Error-handling|Error handling]] section). Each of the error objects
may contain "events" key with list of indexes of events affected by this
particular error. If there is error object without "events" key, caller
must consider all events affected.
Should the call fail because of errors in just couple of events, error
message will contain JSON object in "detail.errors" section. The keys of the
object are indexes into POST data array, values are error messages for each
particular failed Idea event.
Example:
$ eventid=$RANDOM$RANDOM$RANDOM$RANDOM$RANDOM
$ detecttime=$(date --rfc-3339=seconds|tr " " "T")
$ client="cz.example.warden.test"
$ printf '
[
{
"Format": "IDEA0",
"ID": "%s",
"DetectTime": "%s",
"Category": ["Test"],
"Node": [{"Name": "%s"}]
}
]' $eventid $detecttime $client |\
curl \
--key $keyfile \
--cert $certfile \
--cacert $cafile \
--connect-timeout 3 \
--request POST \
--data-binary "@-" \
"https://warden.example.org/sendEvents?client=$client&secret=SeCrEt"
{"saved":1}
(However note that this is not the best way to generate Idea messages. :) )
Example with error:
$ curl \
--key $keyfile \
--cert $certfile \
--cacert $cafile \
--connect-timeout 3 \
--request POST \
--data-binary '[{"Format": "IDEA0","ID":"ASDF","Category":[],"DetectTime":"asdf"}]' \
"https://warden.example.org/sendEvents?client=cz.example.warden.test&secret=SeCrEt"
{"errors":
[
{"message": "Validation error: key \"DetectTime\", value \"asdf\", expected - RFC3339 timestamp.",
"events": [0],
"error": 460
}
],
"method": "sendEvents",
"req_id": 3726454025
}
= getInfo =
Returns basic server information.
Returns:
* version - Warden server version string
* description - server greeting
* send_events_limit - sendEvents will be rejected if client sends more
events in one call
* get_events_limit - getEvents will return at most that much events
Example:
$ curl \
--key key.pem \
--cert cert.pem \
--cacert ca.pem \
--connect-timeout 3 \
--request POST \
"https://warden.example.org/getInfo?secret=SeCrEt"
{"version": "3.0",
"send_events_limit": 500,
"get_events_limit": 1000,
"description": "Warden 3 server"}
E. Python API
Python API tries to abstract from raw HTTPS/URL/JSON details. User
instantiates Client class with necessary settings (certificates, secret,
client name, logging, limits, ...) and then uses its method to access server.
= Client constructor =
wclient = warden.Client(
url,
certfile=None,
keyfile=None,
cafile=None,
timeout=60,
retry=3,
pause=5,
get_events_limit=6000,
send_events_limit=500,
errlog={},
syslog=None,
filelog=None,
idstore=None,
name="org.example.warden_client",
secret=None)
* url - Warden server base URL
* certfile, keyfile, cafile - paths to X509 material
* timeout - network timeout value in seconds
* retry - number retries on transitional errors during sending events
* pause - wait time in seconds between transitional error retries
* get_events_limit - maximum number of events to receive (note that server
may have its own notion)
* send_events_limit - when sending, event lists will be split and sent by
chunks of at most this size (note that used value will get adjusted according
to the limit reported by server)
* errlog - stderr logging configuration dict
* level - most verbose loglevel to log
* syslog - syslog logging configuration dict
* level - most verbose loglevel to log
* socket - syslog socket path (defaults to "/dev/log")
* facility - syslog facility (defaults to "local7")
* filelog - file logging configuration dict
* level - most verbose loglevel to log
* file - path to log file
* idstore - path to simple text file, in which last received event ID gets
stored. If None, server notion is used
* name - client name
* secret - authentication secret
= Configuration file helper =
warden.read_cfg(cfgfile)
Warden object can get initialized from JSON like configuration file. It's
essentially JSON, but full line comments, starting with "#" or "//", are
allowed. read_cfg reads the configuration file and returns dict suitable
for passing as Client constructor arguments.
Usage example:
wclient = warden.Client(**warden.read_cfg("warden_client.cfg"))
= warden.Client.getEvents =
wclient.getEvents(
id=None,
idstore=None,
count=1,
cat=None, nocat=None,
tag=None, notag=None,
group=None, nogroup=None)
* id - can be used to explicitly override value from idstore file
* idstore - can be used to explicitly override idstore for this request
* count - number of requested events
* cat, nocat - selects only events with categories, which are/are not
present in the event Category field (mutually exclusive)
* group, nogroup - selects only events originated/not originated from
this realms and/or client names, as denoted in the event Node.Name field
(mutually exclusive)
* tag, notag - selects only events with/without this client description
tags, as denoted in the event Node.Type field (mutually exclusive)
Returns:
* list of Idea events
= warden.Client.sendEvents =
wclient.sendEvents(self, events=[], retry=None, pause=None):
* events - list of Idea events to be sent to server
* retry - use this retry value just for this call instead from value from
constructor
* pause - use this pause value just for this call instead from value from
constructor
Returns:
* dict with number of sent events under "saved" key
Note:
events list length is limited only by available resources, sendEvents
will split it and send separately in at most send_events_limit long chunks
(however note that sendEvents will also need additional memory for its
internal data structures).
Server errors (5xx) are considered transitional and sendEvents will do
retry number of attempts to deliver corresponding events, delayed by
pause seconds.
Should the call fail because of errors, particular errors may contain
"events" list. Values of the list are then indexes into POST data array. If
no "events" list is present, all events attempted to send must be
considered as failed (with this particular error). See also
[[Warden3#Error-handling|Error handling]] section.
Errors may also contain event IDs from Idea messages in "events_id" list.
This is primarily for logging - client administrator may identify offending
messages by stable identifiers.
= warden.Client.getInfo =
wclient.getInfo()
Returns dictionary of information from getInfo Warden call.
= Error class =
Error(
message,
logger=None,
error=None,
prio="error",
method=None,
req_id=None,
detail=None,
exc=None)
Class, which gets returned in case of client or server error. Caller can
test whether it received data or error by checking:
isinstance(res, Error).
However if he does not want to deal with errors altogether, this error
object also returns False value if used in Bool context and acts as an
empty iterator - in following examples do_stuff() is not evaluated:
if res:
do_stuff(res)
for e in res:
do_stuff(e)
str(Error_Instance) outputs formatted error, info_str() and
debug_str() output increasingly more detailed info.
------------------------------------------------------------------------------
Copyright (C) 2011-2022 Cesnet z.s.p.o
{
"url": "https://warden-hub.example.org/warden3",
"certfile": "cert.pem",
"keyfile": "key.pem",
"filelog": {"level": "debug"},
"name": "org.example.warden_client",
"secret": "ToP_SeCrEt"
}
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# #
# Copyright (C) 2011-2013 Cesnet z.s.p.o # Copyright (C) 2011-2015 Cesnet z.s.p.o
# Use of this source is governed by a 3-clause BSD-style license, see LICENSE file. # Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
import json, httplib, ssl, socket, logging, logging.handlers import hashlib # Some Python/ssl versions incorrectly initialize hashes, this helps
from urlparse import urlparse from sys import stderr, exc_info, version_info
from urllib import urlencode import json, ssl, socket, logging, logging.handlers, time
from sys import stderr, exc_info
from pprint import pformat
from traceback import format_tb from traceback import format_tb
from os import path from os import path
from operator import itemgetter
fix_logging_filename = str if version_info<(2, 7) else lambda x: x
if version_info[0] >= 3:
import http.client as httplib
from urllib.parse import urlparse
from urllib.parse import urlencode
basestring = str
else:
import httplib
from urlparse import urlparse
from urllib import urlencode
VERSION = "3.0-beta3"
DEFAULT_CA_STORES = [
"/etc/ssl/certs/ca-certificates.crt", # Deb
"/etc/pki/tls/certs/ca-bundle.crt", # RH
"/var/lib/ca-certificates/ca-bundle.pem" # SuSE
]
class HTTPSConnection(httplib.HTTPSConnection): class HTTPSConnection(httplib.HTTPSConnection):
''' '''
...@@ -19,11 +39,12 @@ class HTTPSConnection(httplib.HTTPSConnection): ...@@ -19,11 +39,12 @@ class HTTPSConnection(httplib.HTTPSConnection):
of SSL/ TLS version and cipher selection. See: of SSL/ TLS version and cipher selection. See:
http://hg.python.org/cpython/file/c1c45755397b/Lib/httplib.py#l1144 http://hg.python.org/cpython/file/c1c45755397b/Lib/httplib.py#l1144
and `ssl.wrap_socket()` and `ssl.wrap_socket()`
Used only if ssl.SSLContext is not available (Python version < 2.7.9)
''' '''
def __init__(self, host, **kwargs): def __init__(self, host, **kwargs):
self.ciphers = kwargs.pop('ciphers',None) self.ciphers = kwargs.pop('ciphers', None)
self.ca_certs = kwargs.pop('ca_certs',None) self.ca_certs = kwargs.pop('ca_certs', None)
self.ssl_version = kwargs.pop('ssl_version',ssl.PROTOCOL_SSLv23) self.ssl_version = kwargs.pop('ssl_version', getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23))
httplib.HTTPSConnection.__init__(self,host,**kwargs) httplib.HTTPSConnection.__init__(self,host,**kwargs)
...@@ -56,24 +77,72 @@ class Error(Exception): ...@@ -56,24 +77,72 @@ class Error(Exception):
Also, it can be raised as an exception. Also, it can be raised as an exception.
""" """
def __init__(self, message, logger=None, error=None, prio="error", method=None, def __init__(self, method=None, req_id=None, errors=None, **kwargs):
req_id=None, detail=None, exc=None):
self.errors = []
self.message = message if errors:
self.error = error self.extend(method, req_id, errors)
self.method = method if kwargs:
self.req_id = req_id self.append(method, req_id, **kwargs)
self.detail = detail
(self.exctype, self.excval, self.exctb) = exc or (None, None, None)
self.cause = self.excval # compatibility with other exceptions def append(self, method=None, req_id=None, **kwargs):
if logger: # We shift method and req_id into each and every error, because
getattr(logger, prio, "error")(str(self)) # we want to be able to simply merge more Error arrays (for
info = self.info_str() # returning errors from more Warden calls at once
if info: if method and not "method" in kwargs:
logger.info(info) kwargs["method"] = method
debug = self.debug_str() if req_id and not "req_id" in kwargs:
if debug: kwargs["req_id"] = req_id
logger.debug(debug) # Ugly, but be paranoid, don't rely on server reply to be well formed
try:
kwargs["error"] = int(kwargs["error"])
except Exception:
kwargs["error"] = 0
if "events" in kwargs:
evlist = kwargs["events"]
try:
evlist_new = []
for ev in evlist:
try:
evlist_new.append(int(ev))
except Exception:
pass
kwargs["events"] = evlist_new
except Exception:
kwargs["events"] = []
if "events_id" in kwargs:
try:
dummy = iter(kwargs["events_id"])
except TypeError:
kwargs["events_id"] = [None]*len(kwargs["events"])
if "send_events_limit" in kwargs:
try:
kwargs["send_events_limit"] = int(kwargs["send_events_limit"])
except Exception:
del kwargs["send_events_limit"]
if "exc" in kwargs:
# Traceback objects cause reference loops, so memory may be not
# correctly free'd. We only need traceback to log it in str_debug(),
# so let's get the string representation now and forget the
# traceback object, thus preventing the loop.
exctype, excvalue, tb = kwargs["exc"]
tb = format_tb(tb)
kwargs["exc"] = exctype, excvalue, tb
self.errors.append(kwargs)
def extend(self, method=None, req_id=None, iterable=[]):
try:
dummy = iter(iterable)
except TypeError:
iterable = [] # Bad joke from server
for e in iterable:
try:
args = dict(e)
except TypeError:
args = {} # Not funny!
self.append(method, req_id, **args)
def __len__ (self): def __len__ (self):
...@@ -90,6 +159,8 @@ class Error(Exception): ...@@ -90,6 +159,8 @@ class Error(Exception):
""" In list or iterable context we're empty """ """ In list or iterable context we're empty """
raise StopIteration raise StopIteration
__next__ = next
def __bool__(self): def __bool__(self):
""" In boolean context we're never True """ """ In boolean context we're never True """
...@@ -98,28 +169,61 @@ class Error(Exception): ...@@ -98,28 +169,61 @@ class Error(Exception):
def __str__(self): def __str__(self):
out = [] out = []
out.append("(%s)" % (self.error or "local")) for e in self.errors:
if self.method is not None: out.append(self.str_err(e))
out.append(" in %s" % self.method) out.append(self.str_info(e))
if self.req_id is not None: return "\n".join(out)
out.append("(%08x)" % self.req_id)
if self.message is not None:
out.append(": %s" % self.message) def log(self, logger=None, prio=logging.ERROR):
if self.excval is not None: if not logger:
out.append(" - cause was %s: %s" % (type(self.excval).__name__, str(self.excval))) logger = logging.getLogger()
for e in self.errors:
logger.log(prio, self.str_err(e))
info = self.str_info(e)
if info:
logger.info(info)
debug = self.str_debug(e)
if debug:
logger.debug(debug)
def str_preamble(self, e):
return "%08x/%s" % (e.get("req_id", 0), e.get("method", "?"))
def str_err(self, e):
out = []
out.append(self.str_preamble(e))
out.append(" Error(%s) %s " % (e.get("error", 0), e.get("message", "Unknown error")))
if "exc" in e and e["exc"]:
out.append("(cause was %s: %s)" % (e["exc"][0].__name__, str(e["exc"][1])))
return "".join(out) return "".join(out)
def info_str(self): def str_info(self, e):
return ("Detail: %s" % pformat(self.detail)) or "" ecopy = dict(e) # shallow copy
ecopy.pop("req_id", None)
ecopy.pop("method", None)
ecopy.pop("error", None)
ecopy.pop("message", None)
ecopy.pop("exc", None)
if ecopy:
out = "%s Detail: %s" % (self.str_preamble(e), json.dumps(ecopy, default=lambda v: str(v)))
else:
out = ""
return out
def debug_str(self): def str_debug(self, e):
out = [] out = []
if self.excval is not None: out.append(self.str_preamble(e))
out.append("Exception %s: %s\n" % (type(self.excval).__name__, str(self.excval))) if not "exc" in e or not e["exc"]:
if self.exctb is not None: return ""
out.append("Traceback:\n%s" % "".join(format_tb(self.exctb))) exc_tb = e["exc"][2] # exc_tb is string repr. of traceback object
if exc_tb:
out.append("Traceback:\n")
out.extend(exc_tb)
return "".join(out) return "".join(out)
...@@ -132,12 +236,15 @@ class Client(object): ...@@ -132,12 +236,15 @@ class Client(object):
keyfile=None, keyfile=None,
cafile=None, cafile=None,
timeout=60, timeout=60,
recv_events_limit=6000, retry=20,
errlog={"level": "debug"}, pause=10,
get_events_limit=6000,
send_events_limit=500,
errlog={},
syslog=None, syslog=None,
filelog=None, filelog=None,
idstore=None, idstore=None,
name="warden_client", name="org.example.warden.test",
secret=None): secret=None):
self.name = name self.name = name
...@@ -151,15 +258,51 @@ class Client(object): ...@@ -151,15 +258,51 @@ class Client(object):
self.conn = None self.conn = None
base = path.join(path.dirname(__file__)) base = path.join(path.dirname(__file__))
self.certfile = path.join(base, certfile or "cert.pem") self.certfile = self.get_readable_file(certfile or "cert.pem", base)
self.keyfile = path.join(base, keyfile or "key.pem") self.keyfile = self.get_readable_file(keyfile or "key.pem", base)
self.cafile = path.join(base, cafile or "ca.pem") self.cafile = self.get_readable_file(cafile if cafile is not None else DEFAULT_CA_STORES, base)
self.timeout = int(timeout) self.timeout = int(timeout)
self.recv_events_limit = int(recv_events_limit) self.get_events_limit = int(get_events_limit)
self.idstore = path.join(base, idstore) if idstore is not None else None self.idstore = path.join(base, idstore) if idstore is not None else None
self.ciphers = 'TLS_RSA_WITH_AES_256_CBC_SHA' self.send_events_limit = int(send_events_limit)
self.sslversion = ssl.PROTOCOL_TLSv1 self.retry = int(retry)
self.pause = int(pause)
self.ciphers = None
self.sslversion = getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23)
# If Python is new enough to have SSLContext, use it for SSL settings,
# otherwise our own class derived from httplib.HTTPSConnection is used
# later in connect().
if hasattr(ssl, 'SSLContext'):
self.sslcontext = ssl.SSLContext(self.sslversion)
self.sslcontext.load_cert_chain(self.certfile, self.keyfile)
if self.cafile:
self.sslcontext.load_verify_locations(self.cafile)
self.sslcontext.verify_mode = ssl.CERT_REQUIRED
else:
self.sslcontext.verify_mode = ssl.CERT_NONE
else:
self.sslcontext = None
self.getInfo() # Call to align limits with server opinion
def get_readable_file(self, name, base):
names = [name] if isinstance(name, basestring) else name
names = [path.join(base, n) for n in names]
errors = []
for n in names:
try:
open(n, "r").close()
self.logger.debug("Using %s" % n)
return n
except IOError as e:
errors.append(e)
for e in errors:
self.logger.error(str(e))
return names[0] if names else None
def init_log(self, errlog, syslog, filelog): def init_log(self, errlog, syslog, filelog):
...@@ -178,7 +321,7 @@ class Client(object): ...@@ -178,7 +321,7 @@ class Client(object):
self.logger.warning("Unknown syslog facility \"%s\", using \"local7\"" % fac) self.logger.warning("Unknown syslog facility \"%s\", using \"local7\"" % fac)
return logging.handlers.SysLogHandler.LOG_LOCAL7 return logging.handlers.SysLogHandler.LOG_LOCAL7
form = "%(filename)s[%(process)d]: (%(levelname)s) %(name)s %(message)s" form = "%(filename)s[%(process)d]: %(name)s (%(levelname)s) %(message)s"
format_notime = logging.Formatter(form) format_notime = logging.Formatter(form)
format_time = logging.Formatter('%(asctime)s ' + form) format_time = logging.Formatter('%(asctime)s ' + form)
...@@ -189,7 +332,7 @@ class Client(object): ...@@ -189,7 +332,7 @@ class Client(object):
if errlog is not None: if errlog is not None:
el = logging.StreamHandler(stderr) el = logging.StreamHandler(stderr)
el.setFormatter(format_time) el.setFormatter(format_time)
el.setLevel(loglevel(errlog.get("level", "debug"))) el.setLevel(loglevel(errlog.get("level", "info")))
self.logger.addHandler(el) self.logger.addHandler(el)
if filelog is not None: if filelog is not None:
...@@ -197,23 +340,24 @@ class Client(object): ...@@ -197,23 +340,24 @@ class Client(object):
fl = logging.FileHandler( fl = logging.FileHandler(
filename=path.join( filename=path.join(
path.dirname(__file__), path.dirname(__file__),
filelog.get("file", "%s.log" % self.name))) filelog.get("file", "%s.log" % self.name)),
fl.setLevel(loglevel(filelog.get("level", "warning"))) encoding="utf-8")
fl.setLevel(loglevel(filelog.get("level", "debug")))
fl.setFormatter(format_time) fl.setFormatter(format_time)
self.logger.addHandler(fl) self.logger.addHandler(fl)
except Exception as e: except Exception as e:
Error("Unable to setup file logging", self.logger, exc=exc_info()) Error(message="Unable to setup file logging", exc=exc_info()).log(self.logger)
if syslog is not None: if syslog is not None:
try: try:
sl = logging.handlers.SysLogHandler( sl = logging.handlers.SysLogHandler(
address=syslog.get("socket", "/dev/log"), address=fix_logging_filename(syslog.get("socket", "/dev/log")),
facility=facility(syslog.get("facility", "local7"))) facility=facility(syslog.get("facility", "local7")))
sl.setLevel(loglevel(syslog.get("level", "warning"))) sl.setLevel(loglevel(syslog.get("level", "debug")))
sl.setFormatter(format_notime) sl.setFormatter(format_notime)
self.logger.addHandler(sl) self.logger.addHandler(sl)
except Exception as e: except Exception as e:
Error("Unable to setup syslog logging", self.logger, exc=exc_info()) Error(message="Unable to setup syslog logging", exc=exc_info()).log(self.logger)
if not (errlog or filelog or syslog): if not (errlog or filelog or syslog):
# User wants explicitly no logging, so let him shoot his socks off. # User wants explicitly no logging, so let him shoot his socks off.
...@@ -222,37 +366,46 @@ class Client(object): ...@@ -222,37 +366,46 @@ class Client(object):
self.logger.addHandler(logging.NullHandler()) self.logger.addHandler(logging.NullHandler())
def log_err(self, err, prio=logging.ERROR):
if isinstance(err, Error):
err.log(self.logger, prio)
return err
def connect(self): def connect(self):
try: try:
if self.url.scheme=="https": if self.url.scheme=="https":
conn = HTTPSConnection( if self.sslcontext:
self.url.netloc, conn = httplib.HTTPSConnection(
strict = False, self.url.netloc,
key_file = self.keyfile, timeout = self.timeout,
cert_file = self.certfile, context = self.sslcontext)
timeout = self.timeout, else:
ciphers = self.ciphers, conn = HTTPSConnection(
ca_certs = self.cafile, self.url.netloc,
ssl_version = self.sslversion) key_file = self.keyfile,
cert_file = self.certfile,
timeout = self.timeout,
ciphers = self.ciphers,
ca_certs = self.cafile,
ssl_version = self.sslversion)
elif self.url.scheme=="http": elif self.url.scheme=="http":
conn = httplib.HTTPConnection( conn = httplib.HTTPConnection(
self.url.netloc, self.url.netloc,
strict = False,
timeout = self.timeout) timeout = self.timeout)
else: else:
return Error("Don't know how to connect to \"%s\"" % self.url.scheme, self.logger, return Error(message="Don't know how to connect to \"%s\"" % self.url.scheme,
detail={"url": self.url.geturl()}) url=self.url.geturl())
except Exception: except Exception:
return Error("HTTPS connection failed", self.logger, exc=exc_info(), return Error(message="HTTP(S) connection failed", exc=exc_info(),
detail={ url=self.url.geturl(),
"url": self.url.geturl(), timeout=self.timeout,
"timeout": self.timeout, key_file=self.keyfile,
"key_file": self.keyfile, cert_file=self.certfile,
"cert_file": self.certfile, cafile=self.cafile,
"cafile": self.cafile, ciphers=self.ciphers,
"ciphers": self.ciphers, ssl_version=self.sslversion)
"ssl_version": self.sslversion})
return conn return conn
...@@ -265,27 +418,30 @@ class Client(object): ...@@ -265,27 +418,30 @@ class Client(object):
kwargs["secret"] = self.secret kwargs["secret"] = self.secret
if kwargs: if kwargs:
for k in kwargs.keys(): for k in list(kwargs.keys()):
if kwargs[k] is None: if kwargs[k] is None:
del kwargs[k] del kwargs[k]
argurl = "?" + urlencode(kwargs, doseq=True) argurl = "?" + urlencode(kwargs, doseq=True)
else: else:
argurl = "" argurl = ""
try: self.headers = {"Accept": "application/json"}
if payload is None: data = None
data = ""
else: if payload is None:
method = "GET"
else:
method = "POST"
try:
data = json.dumps(payload) data = json.dumps(payload)
except: except:
return Error("Serialization to JSON failed", self.logger, return Error(message="Serialization to JSON failed",
exc=exc_info(), method=func, detail=payload) exc=exc_info(), method=func, payload=payload)
self.headers = { self.headers.update({
"Content-Type": "application/json", "Content-Type": "application/json",
"Accept": "application/json", "Content-Length": str(len(data))
"Content-Length": str(len(data)) })
}
# HTTP(S)Connection is oneshot object (and we don't speak "pipelining") # HTTP(S)Connection is oneshot object (and we don't speak "pipelining")
conn = self.connect() conn = self.connect()
...@@ -294,57 +450,46 @@ class Client(object): ...@@ -294,57 +450,46 @@ class Client(object):
loc = '%s/%s%s' % (self.url.path, func, argurl) loc = '%s/%s%s' % (self.url.path, func, argurl)
try: try:
conn.request("POST", loc, data, self.headers) conn.request(method, loc, data, self.headers)
except: except:
conn.close() conn.close()
return Error("Sending of request to server failed", self.logger, return Error(message="Sending of request to server failed",
exc=exc_info(), method=func, detail={ exc=exc_info(), method=func, log=loc, headers=self.headers, data=data)
"loc": loc,
"headers": self.headers,
"data": data})
try: try:
res = conn.getresponse() res = conn.getresponse()
except: except:
conn.close() conn.close()
return Error("HTTP reply failed", self.logger, method=func, exc=exc_info(), detail={ return Error(method=func, message="HTTP reply failed",
"loc": loc, exc=exc_info(), loc=loc, headers=self.headers, data=data)
"headers": self.headers,
"data": data})
try: try:
response_data = res.read() response_data = res.read()
except: except:
conn.close() conn.close()
return Error("Fetching HTTP data from server failed", self.logger, method=func, exc=exc_info(), detail={ return Error(method=func, message="Fetching HTTP data from server failed",
"loc": loc, exc=exc_info(), loc=loc, headers=self.headers, data=data)
"headers": self.headers,
"data": data})
conn.close() conn.close()
if res.status==httplib.OK: if res.status==httplib.OK:
try: try:
data = json.loads(response_data) data = json.loads(response_data.decode("utf-8"))
except: except:
data = Error("JSON message parsing failed", self.logger, data = Error(method=func, message="JSON message parsing failed",
exc=exc_info(), method=func, detail={"response": response_data}) exc=exc_info(), response=response_data)
else: else:
try: try:
data = json.loads(response_data) data = json.loads(response_data.decode("utf-8"))
data["error"] # trigger exception if not dict or no error key data["errors"] # trigger exception if not dict or no error key
except: except:
data = Error("Generic server HTTP error", self.logger, data = Error(method=func, message="Generic server HTTP error",
method=func, error=res.status, exc=exc_info(), response=response_data)
error=res.status,
exc=exc_info(),
detail={"response": response_data})
else: else:
data = Error(data.get("message", None), self.logger, data = Error(
method=data.get("method", None), method=data.get("method", None),
error=res.status,
req_id=data.get("req_id", None), req_id=data.get("req_id", None),
detail=data.get("detail", None)) errors=data.get("errors", []))
return data return data
...@@ -358,8 +503,8 @@ class Client(object): ...@@ -358,8 +503,8 @@ class Client(object):
f.write(str(id)) f.write(str(id))
except (ValueError, IOError) as e: except (ValueError, IOError) as e:
# Use Error instance just for proper logging # Use Error instance just for proper logging
Error("Writing id file \"%s\" failed" % idf, self.logger, Error(message="Writing id file \"%s\" failed" % idf, exc=exc_info(),
prio="info", exc=exc_info(), detail={"idstore": idf}) idstore=idf).log(self.logger, logging.INFO)
return id return id
...@@ -371,24 +516,96 @@ class Client(object): ...@@ -371,24 +516,96 @@ class Client(object):
with open(idf, "r") as f: with open(idf, "r") as f:
id = int(f.read()) id = int(f.read())
except (ValueError, IOError) as e: except (ValueError, IOError) as e:
Error("Reading id file \"%s\" failed, relying on server" % idf, Error(message="Reading id file \"%s\" failed, relying on server" % idf,
self.logger, prio="info", exc=exc_info(), detail={"idstore": idf}) exc=exc_info(), idstore=idf).log(self.logger, logging.INFO)
id = None id = None
return id return id
def getDebug(self): def getDebug(self):
return self.sendRequest("getDebug") return self.log_err(self.sendRequest("getDebug"))
def getInfo(self): def getInfo(self):
return self.sendRequest("getInfo") res = self.sendRequest("getInfo")
if isinstance(res, Error):
res.log(self.logger)
else:
try:
self.send_events_limit = min(res["send_events_limit"], self.send_events_limit)
self.get_events_limit = min(res["get_events_limit"], self.get_events_limit)
except (AttributeError, TypeError, KeyError):
pass
return res
def sendEvents(self, events=[]): def send_events_raw(self, events=[]):
res = self.sendRequest( return self.sendRequest("sendEvents", payload=events)
"sendEvents", payload=events)
return res
def send_events_chunked(self, events=[]):
""" Split potentially long "events" list to send_events_limit
long chunks to avoid slap from server.
"""
count = len(events)
err = Error()
send_events_limit = self.send_events_limit # object stored value can change during sending
for offset in range(0, count, send_events_limit):
res = self.send_events_raw(events[offset:min(offset+send_events_limit, count)])
if isinstance(res, Error):
# Shift all error indices by offset to correspond with 'events' list
for e in res.errors:
evlist = e.get("events", [])
# Update sending limit advice, if present in error
srv_limit = e.get("send_events_limit")
if srv_limit:
self.send_events_limit = min(self.send_events_limit, srv_limit)
for i in range(len(evlist)):
evlist[i] += offset
err.errors.extend(res.errors)
return err if err.errors else {}
def sendEvents(self, events=[], retry=None, pause=None):
""" Send out "events" list to server, retrying on server errors.
"""
ev = events
idx_xlat = list(range(len(ev)))
err = Error()
retry = retry or self.retry
attempt = retry
while ev and attempt:
if attempt<retry:
self.logger.info("%d transient errors, retrying (%d to go)" % (len(ev), attempt))
time.sleep(pause or self.pause)
res = self.send_events_chunked(ev)
attempt -= 1
next_ev = []
next_idx_xlat = []
if isinstance(res, Error):
# Sort to process fatal errors first
res.errors.sort(key=itemgetter("error"))
for e in res.errors:
errno = e["error"]
evlist = e.get("events", list(range(len(ev)))) # none means all
if errno < 500 or not attempt:
# Fatal error or last try, translate indices
# to original and prepare for returning to caller
for i in range(len(evlist)):
evlist[i] = idx_xlat[evlist[i]]
err.errors.append(e)
else:
# Maybe transient error, prepare to try again
for evlist_i in evlist:
next_ev.append(ev[evlist_i])
next_idx_xlat.append(idx_xlat[evlist_i])
ev = next_ev
idx_xlat = next_idx_xlat
return self.log_err(err) if err.errors else {"saved": len(events)}
def getEvents(self, id=None, idstore=None, count=None, def getEvents(self, id=None, idstore=None, count=None,
...@@ -396,25 +613,25 @@ class Client(object): ...@@ -396,25 +613,25 @@ class Client(object):
tag=None, notag=None, tag=None, notag=None,
group=None, nogroup=None): group=None, nogroup=None):
if not id: if id is None:
id = self._loadID(idstore) id = self._loadID(idstore)
res = self.sendRequest( res = self.sendRequest(
"getEvents", id=id, count=count or self.recv_events_limit, cat=cat, "getEvents", id=id, count=self.get_events_limit if count is None else count, cat=cat,
nocat=nocat, tag=tag, notag=notag, group=group, nogroup=nogroup) nocat=nocat, tag=tag, notag=notag, group=group, nogroup=nogroup)
if not res:
return res # Should be Error instance
try:
events = res["events"]
newid = res["lastid"]
except KeyError:
return Error("Server returned bogus reply", self.logger,
method="getEvents", exc=exc_info(), detail={"response": res})
self._saveID(newid) if res:
try:
events = res["events"]
newid = res["lastid"]
except KeyError:
events = Error(method="getEvents", message="Server returned bogus reply",
exc=exc_info(), response=res)
self._saveID(newid)
else:
events = res
return events return self.log_err(events)
def close(self): def close(self):
...@@ -427,8 +644,27 @@ class Client(object): ...@@ -427,8 +644,27 @@ class Client(object):
def format_timestamp(epoch=None, utc=True, utcoffset=None):
if utcoffset is None:
utcoffset = -(time.altzone if time.daylight else time.timezone)
if epoch is None:
epoch = time.time()
if utc:
epoch += utcoffset
us = int(epoch % 1 * 1000000 + 0.5)
return format_time(*time.gmtime(epoch)[:6], microsec=us, utcoffset=utcoffset)
def format_time(year, month, day, hour, minute, second, microsec=0, utcoffset=None):
if utcoffset is None:
utcoffset = -(time.altzone if time.daylight else time.timezone)
tstr = "%04d-%02d-%02dT%02d:%02d:%02d" % (year, month, day, hour, minute, second)
usstr = "." + str(microsec).rstrip("0") if microsec else ""
offsstr = ("%+03d:%02d" % divmod((utcoffset+30)//60, 60)) if utcoffset else "Z"
return tstr + usstr + offsstr
def read_cfg(cfgfile): def read_cfg(cfgfile):
abspath = path.join(path.dirname(__file__), cfgfile) with open(cfgfile, "r") as f:
with open(abspath, "r") as f: stripcomments = "\n".join((l for l in f if not l.lstrip().startswith(("#", "//"))))
stripcomments = "\n".join((l for l in f if not l.lstrip().startswith("#", "//")))
return json.loads(stripcomments) return json.loads(stripcomments)
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# #
# Copyright (C) 2011-2013 Cesnet z.s.p.o # Copyright (C) 2011-2015 Cesnet z.s.p.o
# Use of this source is governed by a 3-clause BSD-style license, see LICENSE file. # Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
from warden_client import Client, Error, read_cfg
import json
import string import string
from time import time, gmtime from time import time
from math import trunc
from uuid import uuid4 from uuid import uuid4
from pprint import pprint from pprint import pprint
from os import path from random import randint, randrange, choice, random
from random import randint, randrange, choice, random; from base64 import b64encode
from base64 import b64encode; from warden_client import Client, Error, read_cfg, format_timestamp
def get_precise_timestamp():
t = time()
us = trunc((t-trunc(t))*1000000)
g = gmtime(t)
iso = '%04d-%02d-%02dT%02d:%02d:%02d.%0dZ' % (g[0:6]+(us,))
return iso
def gen_min_idea(): def gen_min_idea():
return { return {
"Format": "IDEA0", "Format": "IDEA0",
"ID": str(uuid4()), "ID": str(uuid4()),
"DetectTime": get_precise_timestamp(), "DetectTime": format_timestamp(),
"Category": ["Test"], "Category": ["Test"],
} }
...@@ -63,18 +53,18 @@ def gen_random_idea(client_name="cz.example.warden.test"): ...@@ -63,18 +53,18 @@ def gen_random_idea(client_name="cz.example.warden.test"):
def randip6(): def randip6():
return [rand6ip, geniprange(rand6ip), rand6cidr][randint(0, 2)]() return [rand6ip, geniprange(rand6ip), rand6cidr][randint(0, 2)]()
def randstr(charlist=string.letters, maxlen=32, minlen=1): def randstr(charlist=string.ascii_letters, maxlen=32, minlen=1):
return ''.join(choice(charlist) for i in range(randint(minlen, maxlen))) return ''.join(choice(charlist) for i in range(randint(minlen, maxlen)))
event = { event = {
"Format": "IDEA0", "Format": "IDEA0",
"ID": str(uuid4()), "ID": str(uuid4()),
"CreateTime": get_precise_timestamp(), "CreateTime": format_timestamp(),
"DetectTime": get_precise_timestamp(), "DetectTime": format_timestamp(),
"WinStartTime": get_precise_timestamp(), "WinStartTime": format_timestamp(),
"WinEndTime": get_precise_timestamp(), "WinEndTime": format_timestamp(),
"EventTime": get_precise_timestamp(), "EventTime": format_timestamp(),
"CeaseTime": get_precise_timestamp(), "CeaseTime": format_timestamp(),
#"Category": ["Abusive.Spam","Abusive.Harassment","Malware","Fraud.Copyright","Test","Fraud.Phishing","Fraud.Scam"], #"Category": ["Abusive.Spam","Abusive.Harassment","Malware","Fraud.Copyright","Test","Fraud.Phishing","Fraud.Scam"],
# "Category": ["Abusive.Spam","Fraud.Copyright"], # "Category": ["Abusive.Spam","Fraud.Copyright"],
"Category": [choice(["Abusive.Spam","Abusive.Harassment","Malware","Fraud.Copyright","Test","Fraud.Phishing","Fraud.Scam"]) for dummy in range(randint(1, 3))], "Category": [choice(["Abusive.Spam","Abusive.Harassment","Malware","Fraud.Copyright","Test","Fraud.Phishing","Fraud.Scam"]) for dummy in range(randint(1, 3))],
...@@ -113,22 +103,25 @@ def gen_random_idea(client_name="cz.example.warden.test"): ...@@ -113,22 +103,25 @@ def gen_random_idea(client_name="cz.example.warden.test"):
"Size": 46, "Size": 46,
"Ref": ["cve:CVE-%s-%s" % (randstr(string.digits, 4), randstr())], "Ref": ["cve:CVE-%s-%s" % (randstr(string.digits, 4), randstr())],
"ContentEncoding": "base64", "ContentEncoding": "base64",
"Content": b64encode(randstr()) "Content": b64encode(randstr().encode('ascii')).decode("ascii")
} }
], ],
"Node": [ "Node": [
{ {
"Name": client_name, "Name": client_name,
"Tags": [choice(["Data", "Protocol", "Honeypot", "Heuristic", "Log"]) for dummy in range(randint(1, 3))], "Type": [choice(["Data", "Protocol", "Honeypot", "Heuristic", "Log"]) for dummy in range(randint(1, 3))],
"SW": ["Kippo"], "SW": ["Kippo"],
"AggrWin": "00:05:00" "AggrWin": "00:05:00"
},
{
"Name": "org.example.warden.client",
"Type": [choice(["Connection", "Datagram"]) for dummy in range(randint(1, 2))],
} }
] ]
} }
return event return event
def main(): def main():
wclient = Client(**read_cfg("warden_client.cfg")) wclient = Client(**read_cfg("warden_client.cfg"))
# Also inline arguments are possible: # Also inline arguments are possible:
...@@ -143,44 +136,53 @@ def main(): ...@@ -143,44 +136,53 @@ def main():
# idstore="MyClient.id", # idstore="MyClient.id",
# name="cz.example.warden.test") # name="cz.example.warden.test")
print "=== Debug ===" print("=== Debug ===")
info = wclient.getDebug() info = wclient.getDebug()
if not isinstance(info, Error): pprint(info)
pprint(info)
# All methods return something.
print "=== Server info ===" # If you want to catch possible errors (for example implement some
# form of persistent retry, or save failed events for later, you may
# check for Error instance and act based on contained info.
# If you want just to be informed, this is not necessary, just
# configure logging correctly and check logs.
if isinstance(info, Error):
print(info)
print("=== Server info ===")
info = wclient.getInfo() info = wclient.getInfo()
if not isinstance(info, Error):
pprint(info)
print "=== Sending 10 event(s) ===" print("=== Sending 10 event(s) ===")
start = time() start = time()
ret = wclient.sendEvents([gen_random_idea(client_name=wclient.name) for i in range(10)]) ret = wclient.sendEvents([gen_random_idea(client_name=wclient.name) for i in range(10)])
print ret print(ret)
print "Time: %f" % (time()-start) print("Time: %f" % (time()-start))
print "=== Getting 10 events ===" print("=== Getting 10 events ===")
start = time() start = time()
# cat = ['Availability', 'Abusive.Spam','Attempt.Login'] # cat = ['Availability', 'Abusive.Spam','Attempt.Login']
# cat = ['Attempt', 'Information','Fraud.Scam','Malware.Virus'] # cat = ['Attempt', 'Information','Fraud.Scam','Malware.Virus']
cat = ['Fraud', 'Abusive.Spam'] # cat = ['Fraud', 'Abusive.Spam']
nocat = ['Availability', 'Information', 'Fraud.Scam'] # nocat = ['Availability', 'Information', 'Fraud.Scam']
cat = []
tag = ['Log', 'Data'] nocat = []
notag = ['Flow', 'Datagram']
#tag = ['Log', 'Data']
group = ['cz.tul.ward.kippo','cz.vsb.buldog.kippo'] #notag = ['Flow', 'Datagram']
nogroup = ['cz.zcu.civ.afrodita','cz.vutbr.net.bee.hpscan'] tag = []
notag = []
ret = wclient.getEvents(count=10, cat=cat, nocat=None, tag=tag, notag=None, group=None, nogroup=nogroup)
print "Time: %f" % (time()-start) #group = ['cz.tul.ward.kippo','cz.vsb.buldog.kippo']
print "Got %i events" % len(ret) #nogroup = ['cz.zcu.civ.afrodita','cz.vutbr.net.bee.hpscan']
group = []
nogroup = []
ret = wclient.getEvents(count=10, cat=cat, nocat=nocat, tag=tag, notag=notag, group=group, nogroup=nogroup)
print("Time: %f" % (time()-start))
print("Got %i events" % len(ret))
for e in ret: for e in ret:
print e["Category"], e["Node"][0]["Tags"], e["Node"][0]["Name"] print(e.get("Category"), e.get("Node")[0].get("Type"), e.get("Node")[0].get("Name"))
if isinstance(ret, Error):
print ret
if __name__ == "__main__": if __name__ == "__main__":
main() main()
#!/bin/sh #!/bin/sh
# #
# Copyright (C) 2011-2013 Cesnet z.s.p.o # Copyright (C) 2011-2015 Cesnet z.s.p.o
# Use of this source is governed by a 3-clause BSD-style license, see LICENSE file. # Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
keyfile='key.pem' if [ "$#" -ne 6 ]; then
certfile='cert.pem' echo "Run me like:"
cafile='tcs-ca-bundle.pem' echo "${0##*/} 'https://warden-hub.example.org/warden3' org.example.warden.client 'ToPsEcReT' key.pem cert.pem tcs-ca-bundle.pem"
exit 1
fi
url="$1" url="$1"
client="$2" client="$2"
secret="$3" secret="$3"
keyfile="$4"
# --fail \ certfile="$5"
# --show-error \ cafile="$6"
#
echo "Test 404" echo "Test 404"
curl \ curl \
...@@ -20,7 +23,6 @@ curl \ ...@@ -20,7 +23,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/blefub?client=$client&secret=$secret" "$url/blefub?client=$client&secret=$secret"
echo echo
...@@ -30,7 +32,6 @@ curl \ ...@@ -30,7 +32,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/?client=$client&secret=$secret" "$url/?client=$client&secret=$secret"
echo echo
...@@ -40,7 +41,6 @@ curl \ ...@@ -40,7 +41,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client" "$url/getEvents?client=$client"
echo echo
...@@ -50,7 +50,6 @@ curl \ ...@@ -50,7 +50,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents" "$url/getEvents"
echo echo
...@@ -60,7 +59,6 @@ curl \ ...@@ -60,7 +59,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=asdf.blefub" "$url/getEvents?client=asdf.blefub"
echo echo
...@@ -70,7 +68,6 @@ curl \ ...@@ -70,7 +68,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=asdf.blefub&secret=$secret" "$url/getEvents?client=asdf.blefub&secret=$secret"
echo echo
...@@ -80,7 +77,6 @@ curl \ ...@@ -80,7 +77,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=ASDFblefub" "$url/getEvents?client=$client&secret=ASDFblefub"
echo echo
...@@ -90,7 +86,6 @@ curl \ ...@@ -90,7 +86,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?secret=$secret" "$url/getEvents?secret=$secret"
echo echo
...@@ -111,7 +106,6 @@ curl \ ...@@ -111,7 +106,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret&cat=bflm" "$url/getEvents?client=$client&secret=$secret&cat=bflm"
echo echo
...@@ -121,7 +115,6 @@ curl \ ...@@ -121,7 +115,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret&cat=Other&nocat=Test" "$url/getEvents?client=$client&secret=$secret&cat=Other&nocat=Test"
echo echo
...@@ -142,7 +135,6 @@ curl \ ...@@ -142,7 +135,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret&self=test" "$url/getEvents?client=$client&secret=$secret&self=test"
echo echo
...@@ -152,7 +144,6 @@ curl \ ...@@ -152,7 +144,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret&bad=guy" "$url/getEvents?client=$client&secret=$secret&bad=guy"
echo echo
...@@ -162,7 +153,6 @@ curl \ ...@@ -162,7 +153,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret" "$url/getEvents?client=$client&secret=$secret"
echo echo
...@@ -172,7 +162,6 @@ curl \ ...@@ -172,7 +162,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getEvents?client=$client&secret=$secret&count=3&id=10" "$url/getEvents?client=$client&secret=$secret&count=3&id=10"
echo echo
...@@ -182,7 +171,6 @@ curl \ ...@@ -182,7 +171,6 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getDebug?client=$client&secret=$secret" "$url/getDebug?client=$client&secret=$secret"
echo echo
...@@ -192,12 +180,5 @@ curl \ ...@@ -192,12 +180,5 @@ curl \
--cert $certfile \ --cert $certfile \
--cacert $cafile \ --cacert $cafile \
--connect-timeout 3 \ --connect-timeout 3 \
--request POST \
"$url/getInfo?client=$client&secret=$secret" "$url/getInfo?client=$client&secret=$secret"
echo echo
#curl \
# --fail \
# --connect-timeout 3 \
# --request POST \
# $url/getEvents
BSD License
Copyright © 2011-2015 Cesnet z.s.p.o
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Cesnet z.s.p.o nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE Cesnet z.s.p.o BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+---------------------------------------+
| Warden Filer 3.0-beta3 for Warden 3.X |
+---------------------------------------+
Content
A. Introduction
B. Dependencies
C. Usage
D. Configuration
E. Directories and locking issues
------------------------------------------------------------------------------
A. Introduction
Warden Filer (executable warden_filer.py) is daemon for easy handling of
Idea events transfer between plain local files and Warden server. The tool can
be instructed to run as one of two daemons - reader and sender.
In reader mode, Filer polls Warden server and saves incoming events as
plain files in directory.
In writer mode, Filer polls directory and sends out all new files out to
Warden server.
------------------------------------------------------------------------------
B. Dependencies
1. Platform
Python 2.7+
2. Python packages
python-daemon 1.5+, warden_client 3.0+
------------------------------------------------------------------------------
C. Usage
warden_filer.py [-h] [-c CONFIG] [--oneshot] {sender,receiver}
Save Warden events as files or send files to Warden
positional arguments:
{sender,receiver} choose direction: sender picks up files and submits
them to Warden, receiver pulls events from Warden
and saves them as files
optional arguments:
-h, --help show this help message and exit
-c CONFIG, --config CONFIG
configuration file path
--oneshot don't daemonise, run just once
-d, --daemon daemonize
-p PID_FILE, --pid_file PID_FILE
create PID file with this name
CONFIG denotes path to configuration file, default is warden_filer.cfg in
current directory.
--oneshot instructs Filer to just do its work once (fetch available events
or send event files present in directory), but obeys all other applicable
options from configuration file (concerning logging, filtering, directories,
etc.)
--daemon instructs Filer to go to full unix daemon mode. Without it,
Filer just stays on foreground.
--pid_file makes Filer to create the usual PID file. Without it, no PID
file gets created.
------------------------------------------------------------------------------
D. Configuration
Configuration is JSON object in file - however, lines starting with "#"
or "//" are allowed and will be ignored as comments. File must contain valid
JSON object, containing configuration. See also warden_filer.cfg as example.
warden - can contain Warden 3 configuration (see Warden doc), or path
to Warden configuration file
sender - configuration section for sender mode
dir - directory, whose "incoming" subdir will be checked for Idea
events to send out
done_dir - directory, into which the messages will be moved after
successful sending. If not set, processed messages will get
deleted, which is default, and usually what you want. Note that
this is just regular directory, no special locking precautions
and no subdirectories are done here, however if "done_dir" is on
the same filesystem as "dir"
filter - filter fields (same as in Warden query, see Warden and Idea
doc, possible keys: cat, nocat, group, nogroup, tag, notag),
unmatched events get discarded and deleted
node - o information about detector to be prepended into event Node
array (see Idea doc). Note that Warden server may require it to
correspond with client registration
poll_time - how often to check incoming directory (in seconds, defaults
to 5)
owait_timeout - how long to opportunistically wait for possible new
incoming files when number of files to process is less than
send_events_limit (in seconds, defaults to poll_time)
owait_poll_time - how often to check incoming directory during
opportunistic timeout (in seconds, defaults to 1)
receiver - configuration section for receiver mode
dir - directory, whose "incoming" subdir will serve as target for events
filter - filter fields for Warden query (see Warden and Idea doc,
possible keys: cat, nocat, group, nogroup, tag, notag)
node - o information about detector to be prepended into event Node
array (see Idea doc). Be careful here, you may ruin Idea
messages by wrongly formatted data and they are not checked
here in any way
poll_time - how often to check Warden server for new events (in seconds,
defaults to 5)
file_limit - limit number of files in "incoming" directory. When the limit
is reached, polling is paused for "limit_wait_time" seconds
limit_wait_time - wait this number of seconds if limit on number of files
is reached (defaults to 5)
Both the "sender" and "reciever" sections can also bear daemon
configuration.
work_dir - where should daemon chdir
chroot_dir - confine daemon into chroot directory
umask - explicitly set umask for created files
uid, gid - uid/gid, under which daemon will run
------------------------------------------------------------------------------
E. Directories and locking issues
Working directories are not just simple paths, but contain structure,
loosely mimicked from Maildir with slightly changed names to avoid first look
confusion. Simple path suffers locking issue: when one process saves file
there, another process has no way to know whether file is already complete
or not, and starting to read prematurely can lead to corrupted data read.
Also, two concurrent processes may decide to work on one file, stomping on
others legs.
So, your scripts and tools inserting data or taking data from working
directories must obey simple protocols, which use atomic "rename" to avoid
locking issues.
Also, your directory (and its structure) _must_ reside on the same
filesystem to keep "rename" atomic. _Never_ try to mount some of the
subdirectories ("tmp", "incoming", "errors") from other filesystem.
1. Inserting file
* The file you want to create _must_ be created in the "tmp" subdirectory
first, _not_ "incoming". Filename is arbitrary, but must be unique among
all subdirectories.
* When done writing, rename the file into "incoming" subdir. Rename is
atomic operation, so for readers, file will appear either nonexistent
or complete.
For simple usage (bash scripts, etc.), just creating sufficiently random
filename in "tmp" and then moving into "incoming" may be enough.
Concatenating $RANDOM couple of times will do. :)
For advanced or potentially concurrent usage inserting enough of unique
information into name is recommended - Filer itself uses hostname, pid,
unixtime, milliseconds, device number and file inode number to avoid
locking issues both on local and network based filesystems and to be
prepared for high traffic.
2. Picking up file
* Rename the file to work with into "tmp" directory.
* Do whatever you want with contents, and when finished, rename file back
into "incoming", or remove, or move somewhere else, or move into "errors"
directory - what suits your needs, after all, it's your file.
Note that in concurrent environment file can disappear between directory
enumeration and attempt to rename - then just pick another one (and
possibly repeat), someone was swifter.
------------------------------------------------------------------------------
Copyright (C) 2011-2015 Cesnet z.s.p.o
#!/bin/bash
script=${0##*/}
warn=0
crit=65536
read -rd '' helps <<EOF
$script: Icinga plugin to check too high number of files in directory.
Usage: $script -d dir [-w num] [-c num] [-h ]
-d dir directory to watch
-w num warning if number of files exceeds this value (default $warn)
-c num critical if number of files exceeds this value (default $crit)
EOF
function bailout {
echo -n "$script" | tr '[:lower:]' '[:upper:]'
echo " $2 $3"
exit "$1"
}
while getopts hvVd:w:c: opt; do
case "$opt" in
h) bailout 3 "UNKNOWN" "$helps";;
d) dir="$OPTARG";;
w) warn="$OPTARG";;
c) crit="$OPTARG";;
"?") bailout 3 "UNKNOWN" "Unknown option, use -h for help";;
esac
done
[ -z "$dir" ] && bailout 3 "UNKNOWN" "-d not specified"
count=$(find "$dir" -mindepth 1 -maxdepth 1 | wc -l)
[ "$count" -gt "$crit" ] && bailout 2 "CRIT" "$count"
[ "$count" -gt "$warn" ] && bailout 1 "WARN" "$count"
bailout 0 "OK" "$count"
// For all options see documentation
{
// Warden config can be also referenced as:
// "warden": "/path/to/warden_client.cfg"
"warden": {
"url": "https://example.com/warden3",
"cafile": "tcs-ca-bundle.pem",
"keyfile": "my.key.pem",
"certfile": "my.cert.pem",
"timeout": 60,
"retry": 20,
"pause": 5,
"filelog": {"level": "debug"},
"name": "com.example.warden.test",
"secret": "SeCrEt"
},
"sender": {
// Maildir like directory, whose "incoming" subdir will be checked
// for Idea events to send out
"dir": "warden_sender",
// Optional filter fields, unmatched events are discarded (and removed)
//"filter": {
// "cat": ["Test", "Recon.Scanning"],
// "nocat": null,
// "group": ["cz.example"],
// "nogroup": null,
// "tag": null,
// "notag": ["Honeypot"]
//},
// Optional information about detector to be prepended into Idea Node array
//"node": {
// "Name": "cz.example.warden.test_sender",
// "Type": ["Relay"]
//}
},
"receiver": {
// Maildir like directory, whose "incoming" will serve as target for events
"dir": "warden_receiver",
// Optional filter fields for Warden query
//"filter": {
// "cat": ["Test", "Recon.Scanning"],
// "nocat": null,
// "group": ["cz.cesnet"],
// "nogroup": null,
// "tag": null,
// "notag": ["Honeypot"]
//},
// Optional information about detector to be prepended into Idea Node array
//"node": {
// "Name": "cz.example.warden.test_receiver",
// "Type": ["Relay"]
//},
// Optional limit on number of files in "incoming" directory
//"file_limit": 10000
}
}
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2015 Cesnet z.s.p.o
# Use of this source is governed by a 3-clause BSD-style license, see LICENSE file.
from warden_client import Client, Error, read_cfg
import json
import string
import os
import sys
import errno
import socket
import time
import logging
import signal
import resource
import atexit
import argparse
from os import path, mkdir
from random import choice, randint
# for py2/py3 compatibility
try:
basestring
except NameError:
basestring = str
VERSION = "3.0-beta3"
class NamedFile(object):
""" Wrapper class for file objects, which allows and tracks filename
changes.
"""
def __init__(self, pth, name, fd=None):
self.name = name
self.path = pth
if fd:
self.f = os.fdopen(fd, "w+b")
else:
self.f = None
def __str__(self):
return "%s(%s, %s)" % (type(self).__name__, self.path, self.name)
def get_path(self, basepath=None, name=None):
return path.join(basepath or self.path, name or self.name)
def open(self, mode):
return open(self.get_path(), mode)
def moveto(self, destpath):
os.rename(self.get_path(), self.get_path(basepath=destpath))
self.path = destpath
def rename(self, newname):
os.rename(self.get_path(), self.get_path(name=newname))
self.name = newname
def remove(self):
os.remove(self.get_path())
class SafeDir(object):
""" Maildir like directory for safe file exchange.
- Producers are expected to drop files into "tmp" under globally unique
filename and rename it into "incoming" atomically (newfile method)
- Workers pick files in "incoming", rename them into "tmp",
do whatever they want, and either discard them or move into
"errors" directory
"""
def __init__(self, p):
self.path = self._ensure_path(p)
self.incoming = self._ensure_path(path.join(self.path, "incoming"))
self.errors = self._ensure_path(path.join(self.path, "errors"))
self.temp = self._ensure_path(path.join(self.path, "tmp"))
self.hostname = socket.gethostname()
self.pid = os.getpid()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.path)
def _ensure_path(self, p):
try:
mkdir(p)
except OSError:
if not path.isdir(p):
raise
return p
def _get_new_name(self, device=0, inode=0):
return "%s.%d.%f.%d.%d.idea" % (
self.hostname, self.pid, time.time(), device, inode)
def newfile(self):
""" Creates file with unique filename within this SafeDir.
- hostname takes care of network filesystems
- pid distinguishes two daemons on one machine
(we are not multithreaded, so this is enough)
- time in best precision supported narrows window within process
- device/inode makes file unique on particular filesystem
In fact, device/inode is itself enough for uniqueness, however
if we mandate wider format, users can use simpler form with
random numbers instead of device/inode, if they choose to,
and it will still ensure reasonable uniqueness.
"""
# Note: this simpler device/inode algorithm replaces original,
# which checked uniqueness among all directories by atomic
# links.
# First find and open name unique within tmp
tmpname = None
while not tmpname:
tmpname = self._get_new_name()
try:
fd = os.open(path.join(self.temp, tmpname), os.O_CREAT | os.O_RDWR | os.O_EXCL)
except OSError as e:
if e.errno != errno.EEXIST:
raise # other errors than duplicates should get noticed
tmpname = None
# Now we know device/inode, rename to make unique within system
stat = os.fstat(fd)
newname = self._get_new_name(stat.st_dev, stat.st_ino)
nf = NamedFile(self.temp, tmpname, fd)
nf.rename(newname)
return nf
def get_incoming(self):
return [NamedFile(self.incoming, n) for n in os.listdir(self.incoming)]
def get_incoming_cnt(self):
"""Get number of files in the incoming directory"""
return len(os.listdir(self.incoming))
def receiver(config, wclient, sdir, oneshot):
poll_time = config.get("poll_time", 5)
node = config.get("node", None)
conf_filt = config.get("filter", {})
file_limit = config.get("file_limit", None)
wait_time = config.get("limit_wait_time", 5)
filt = {}
# Extract filter explicitly to be sure we have right param names for getEvents
for s in ("cat", "nocat", "tag", "notag", "group", "nogroup"):
filt[s] = conf_filt.get(s, None)
while running_flag:
count_ok = count_err = 0
limit_reached = False
if file_limit:
cnt_files = sdir.get_incoming_cnt() # Count files in 'incoming' dir
remain_to_limit = file_limit - cnt_files
# Query server, but not for more events than what can fit into limit
if remain_to_limit > 0:
events = wclient.getEvents(count=remain_to_limit, **filt)
else:
events = []
# Check whether limit was reached
if len(events) >= remain_to_limit:
limit_reached = True
else:
events = wclient.getEvents(**filt)
for event in events:
if node:
nodelist = event.setdefault("Node", [])
nodelist.insert(0, node)
try:
nf = None
nf = sdir.newfile()
with nf.f as f:
data = json.dumps(event)
f.write(data.encode('utf-8'))
nf.moveto(sdir.incoming)
count_ok += 1
except Exception as e:
Error(message="Error saving event", exc=sys.exc_info(), file=str(nf),
event_ids=[event.get("ID")], sdir=sdir.path).log(wclient.logger)
count_err += 1
if events:
wclient.logger.info(
"warden_filer: received %d, errors %d"
% (count_ok, count_err))
if limit_reached:
wclient.logger.info("Limit on number of files in 'incoming' dir reached.")
if oneshot:
if not events or limit_reached:
terminate_me(None, None)
else:
if limit_reached:
time.sleep(wait_time)
elif not events:
time.sleep(poll_time)
def match_event(event, cat=None, nocat=None, tag=None, notag=None, group=None, nogroup=None):
cat_match = tag_match = group_match = True
if cat or nocat:
event_cats = event.get("Category")
event_full_cats = set(event_cats) | set(cat.split(".", 1)[0] for cat in event_cats)
cat_match = set(cat or nocat) & event_full_cats
cat_match = not cat_match if nocat else cat_match
try:
event_node = event.get("Node", [])[0]
except IndexError:
event_node = {}
if tag or notag:
event_tags = set(event_node.get("Type", []))
tag_match = set(tag or notag) & event_tags
tag_match = not tag_match if notag else tag_match
if group or nogroup:
event_name = event_node.get("Name")
namesplit = event_name.split(".")
allnames = set([".".join(namesplit[0:l]) for l in range(1, len(namesplit)+1)])
group_match = set(group or nogroup) & allnames
group_match = not group_match if nogroup else group_match
return cat_match and tag_match and group_match
def get_dir_list(sdir, owait_poll_time, owait_timeout, nfchunk, oneshot):
nflist = sdir.get_incoming()
if oneshot and not nflist:
terminate_me(None, None)
timeout = time.time() + owait_timeout
while len(nflist)<nfchunk and time.time()<timeout and running_flag:
time.sleep(owait_poll_time)
nflist = sdir.get_incoming()
return nflist
def sender(config, wclient, sdir, oneshot):
poll_time = config.get("poll_time", 5)
owait_poll_time = config.get("owait_poll_time", 1)
owait_timeout = config.get("owait_timeout", poll_time)
node = config.get("node", None)
done_dir = config.get("done_dir", None)
conf_filt = config.get("filter", {})
filt = {}
# Extract filter explicitly to be sure we have right param names for match_event
for s in ("cat", "nocat", "tag", "notag", "group", "nogroup"):
filt[s] = conf_filt.get(s, None)
nfchunk = wclient.send_events_limit
while running_flag:
nflist = get_dir_list(sdir, owait_poll_time, owait_timeout, nfchunk, oneshot)
if oneshot and not nflist:
terminate_me(None, None)
while running_flag and not nflist:
# No new files, wait and try again
time.sleep(poll_time)
nflist = get_dir_list(sdir, owait_poll_time, owait_timeout, nfchunk, oneshot)
# Loop over all chunks. However:
# - omit the last loop, if there is less data than the optimal window;
# next get_dir_list will still get it again, possibly together with
# new files, which may have appeared meanwhile
# - unless it's the sole loop (so that at least _something_ gets sent)
nfindex = 0
while nfindex<len(nflist) and ((len(nflist)-nfindex>=nfchunk) or not nfindex):
events = []
nf_sent = []
count_ok = count_err = count_unmatched = count_local = 0
for nf in nflist[nfindex:nfindex+nfchunk]:
# prepare event array from files
try:
nf.moveto(sdir.temp)
except Exception:
continue # Silently go to next filename, somebody else might have interfered
try:
with nf.open("rb") as fd:
data = fd.read().decode('utf-8')
event = json.loads(data)
if not match_event(event, **filt):
wclient.logger.debug("Unmatched event: %s" % data)
count_unmatched += 1
nf.remove()
continue
if node:
nodelist = event.setdefault("Node", [])
nodelist.insert(0, node)
events.append(event)
nf_sent.append(nf)
except Exception as e:
Error(message="Error loading event", exc=sys.exc_info(), file=str(nf),
sdir=sdir.path).log(wclient.logger)
nf.moveto(sdir.errors)
count_local += 1
res = wclient.sendEvents(events)
if isinstance(res, Error):
for e in res.errors:
errno = e["error"]
evlist = e.get("events", range(len(nf_sent))) # None means all
for i in evlist:
if nf_sent[i]:
nf_sent[i].moveto(sdir.errors)
nf_sent[i] = None
count_err += 1
# Cleanup rest - the succesfully sent events
for name in nf_sent:
if name:
if done_dir:
name.moveto(done_dir)
else:
name.remove()
count_ok += 1
wclient.logger.info(
"warden_filer: saved %d, warden errors %d, local errors %d, unmatched %d" % (count_ok, count_err, count_local, count_unmatched))
nfindex += nfchunk # skip to next chunk of files
nfchunk = wclient.send_events_limit # might get changed by server
def get_logger_files(logger):
""" Return file objects of loggers """
files = []
for handler in logger.handlers:
if hasattr(handler, 'stream') and hasattr(handler.stream, 'fileno'):
files.append(handler.stream)
if hasattr(handler, 'socket') and hasattr(handler.socket, 'fileno'):
files.append(handler.socket)
return files
def daemonize(
work_dir = None, chroot_dir = None,
umask = None, uid = None, gid = None,
pidfile = None, files_preserve = [], signals = {}):
# Dirs, limits, users
if chroot_dir is not None:
os.chdir(chroot_dir)
os.chroot(chroot_dir)
if umask is not None:
os.umask(umask)
if work_dir is not None:
os.chdir(work_dir)
if gid is not None:
os.setgid(gid)
if uid is not None:
os.setuid(uid)
# Doublefork, split session
if os.fork()>0:
os._exit(0)
os.setsid()
if os.fork()>0:
os._exit(0)
# Setup signal handlers
for (signum, handler) in signals.items():
signal.signal(signum, handler)
# Close descriptors
descr_preserve = set(f.fileno() for f in files_preserve)
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd==resource.RLIM_INFINITY:
maxfd = 65535
for fd in range(maxfd, 3, -1): # 3 means omit stdin, stdout, stderr
if fd not in descr_preserve:
try:
os.close(fd)
except Exception:
pass
# Redirect stdin, stdout, stderr to /dev/null
devnull = os.open(os.devnull, os.O_RDWR)
for fd in range(3):
os.dup2(devnull, fd)
# PID file
if pidfile is not None:
pidd = os.open(pidfile, os.O_RDWR|os.O_CREAT|os.O_EXCL|os.O_TRUNC)
os.write(pidd, (str(os.getpid())+"\n").encode())
os.close(pidd)
# Define and setup atexit closure
@atexit.register
def unlink_pid():
try:
os.unlink(pidfile)
except Exception:
pass
running_flag = True # Daemon cleanly exits when set to False
def terminate_me(signum, frame):
global running_flag
running_flag = False
class DummyContext(object):
""" In one shot mode we use this instead of DaemonContext """
def __enter__(self): pass
def __exit__(self, *exc): pass
def get_args():
argp = argparse.ArgumentParser(
description="Save Warden events as files or send files to Warden")
argp.add_argument("func",
choices=["sender", "receiver"],
action="store",
help="choose direction: sender picks up files and submits them to "
"Warden, receiver pulls events from Warden and saves them as files")
argp.add_argument("-c", "--config",
default=path.splitext(__file__)[0]+".cfg",
dest="config",
help="configuration file path")
argp.add_argument("-o", "--oneshot",
default=False,
dest="oneshot",
action="store_true",
help="don't daemonise, run just once")
argp.add_argument("-d", "--daemon",
default=False,
dest="daemon",
action="store_true",
help="daemonize")
argp.add_argument("-p", "--pid_file",
default=None,
dest="pid_file",
action="store",
help="create PID file with this name")
return argp.parse_args()
def get_configs():
config = read_cfg(args.config)
# Allow inline or external Warden config
wconfig = config.get("warden", "warden_client.cfg")
if isinstance(wconfig, basestring):
wconfig = read_cfg(wconfig)
fconfig = config.get(args.func, {})
return wconfig, fconfig
def get_uid_gid(str_id, get_nam_func):
if str_id:
try:
id = int(str_id)
except ValueError:
id = get_nam_func(str_id)[2]
else:
id = None
return id
if __name__ == "__main__":
args = get_args()
function = sender if args.func=="sender" else receiver
wconfig, fconfig = get_configs()
wclient = Client(**wconfig)
try:
if args.daemon:
from pwd import getpwnam
from grp import getgrnam
uid = get_uid_gid(fconfig.get("uid"), getpwnam)
gid = get_uid_gid(fconfig.get("gid"), getgrnam)
daemonize(
work_dir = fconfig.get("work_dir", "."),
chroot_dir = fconfig.get("chroot_dir"),
umask = fconfig.get("umask"),
uid = uid,
gid = gid,
pidfile = args.pid_file,
files_preserve = get_logger_files(wclient.logger),
signals = {
signal.SIGTERM: terminate_me,
signal.SIGINT: terminate_me,
signal.SIGHUP: signal.SIG_IGN,
signal.SIGTTIN: signal.SIG_IGN,
signal.SIGTTOU: signal.SIG_IGN})
safe_dir = SafeDir(fconfig.get("dir", args.func))
wclient.logger.info("Starting %s" % args.func)
function(fconfig, wclient, safe_dir, args.oneshot)
wclient.logger.info("Exiting %s" % args.func)
except Exception as e:
Error(message="%s daemon error" % args.func, exc=sys.exc_info()).log(wclient.logger)
# You may want to review and/or change the logfile path, user/group in
# 'create' and daemon to restart in 'postrotate'
/var/log/warden_filer.log
{
rotate 52
weekly
missingok
notifempty
compress
delaycompress
dateext
create 640 mentat mentat
postrotate
/etc/init.d/warden_filer_sender restart
/etc/init.d/warden_filer_receiver restart
endscript
}
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: warden_filer_receiver
# Required-Start: $local_fs $syslog
# Required-Stop: $local_fs $syslog
# Should-Start: $network $named
# Should-Stop: $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Warden Filer - receiver
### END INIT INFO
DAEMON_NAME=warden_filer
FUNC=receiver
DAEMON_PATH=/usr/local/bin/"$DAEMON_NAME".py
SERVICE_NAME="${DAEMON_NAME}_${FUNC}"
PID=/var/run/"$DAEMON_NAME"/"$FUNC".pid
CONFIG=/etc/"$DAEMON_NAME".cfg
# Try Debian & Fedora/RHEL/Suse sysconfig
for n in default sysconfig; do
[ -f /etc/$n/"$SERVICE_NAME" ] && . /etc/$n/"$SERVICE_NAME"
done
# Fallback
function log_daemon_msg () { echo -n "$@"; }
function log_end_msg () { [ $1 -eq 0 ] && echo " OK" || echo " Failed"; }
function status_of_proc () { [ -f "$PID" ] && ps u -p $(<"$PID") || echo "$PID not found."; }
function start_daemon () { shift; shift; $* ; }
function killproc () { kill $(cat $PID) ; }
[ -f /lib/lsb/init-functions ] && . /lib/lsb/init-functions
ACTION="$1"
case "$ACTION" in
start)
mkdir -p "${PID%/*}"
log_daemon_msg "Starting $SERVICE_NAME" "$SERVICE_NAME"
start_daemon -p "$PID" "$DAEMON_PATH" -c "$CONFIG" --pid_file "$PID" --daemon "$FUNC"
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping $SERVICE_NAME" "$SERVICE_NAME"
killproc -p "$PID" "$DAEMON_PATH"
log_end_msg $?
;;
restart|force-reload)
$0 stop && sleep 2 && exec $0 start
;;
status)
status_of_proc -p "$PID" "$DAEMON_PATH" "$SERVICE_NAME"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 2
;;
esac