perfsonar-dev - [pS-dev] [GEANT/SA2/SA2T3-OPPD] r692 - in trunk/build: . HADES HADES/_build HADES/bin HADES/etc HADES/lib
Subject: perfsonar development work
List archive
[pS-dev] [GEANT/SA2/SA2T3-OPPD] r692 - in trunk/build: . HADES HADES/_build HADES/bin HADES/etc HADES/lib
Chronological Thread
- From:
- To:
- Subject: [pS-dev] [GEANT/SA2/SA2T3-OPPD] r692 - in trunk/build: . HADES HADES/_build HADES/bin HADES/etc HADES/lib
- Date: Fri, 18 Mar 2011 13:05:36 GMT
Author: dfn.calim
Date: 2011-03-18 13:05:36 +0000 (Fri, 18 Mar 2011)
New Revision: 692
Added:
trunk/build/HADES/
trunk/build/HADES/Build
trunk/build/HADES/MANIFEST
trunk/build/HADES/MANIFEST.SKIP
trunk/build/HADES/MANIFEST.bak
trunk/build/HADES/META.yml
trunk/build/HADES/MYMETA.yml
trunk/build/HADES/Makefile.PL
trunk/build/HADES/_build/
trunk/build/HADES/_build/auto_features
trunk/build/HADES/_build/build_params
trunk/build/HADES/_build/cleanup
trunk/build/HADES/_build/config_data
trunk/build/HADES/_build/features
trunk/build/HADES/_build/magicnum
trunk/build/HADES/_build/notes
trunk/build/HADES/_build/prereqs
trunk/build/HADES/_build/runtime_params
trunk/build/HADES/bin/
trunk/build/HADES/bin/hades-analyzed.pl
trunk/build/HADES/bin/hades-analyzer.pl
trunk/build/HADES/bin/hades-cfg-create.pl
trunk/build/HADES/bin/hades-cfg-gui.pl
trunk/build/HADES/bin/hades-cfg-test.pl
trunk/build/HADES/bin/hades-check-data-ippm-csv.pl
trunk/build/HADES/bin/hades-check-data-ippm.pl
trunk/build/HADES/bin/hades-check-data-tracert.pl
trunk/build/HADES/bin/hades-check-data.pl
trunk/build/HADES/bin/hades-check-wrapper.pl
trunk/build/HADES/bin/hades-cmd.pl
trunk/build/HADES/bin/hades-cp.pl
trunk/build/HADES/bin/hades-meta2db.pl
trunk/build/HADES/bin/hades-mkmap.pl
trunk/build/HADES/bin/hades-nagios-cfg-hosts.pl
trunk/build/HADES/bin/hades-nagios-cfg-routes.pl
trunk/build/HADES/bin/hades-nagios-check.pl
trunk/build/HADES/bin/hades-plot.pl
trunk/build/HADES/bin/hades-show-data.pl
trunk/build/HADES/bin/hades-show-extremes.pl
trunk/build/HADES/bin/hades-show-hosts.pl
trunk/build/HADES/bin/hades-ssh.pl
trunk/build/HADES/bin/hades-traceroute.pl
trunk/build/HADES/bin/hadescfg.pl
trunk/build/HADES/etc/
trunk/build/HADES/etc/hades-analyzed.conf.example
trunk/build/HADES/etc/hades-analyzed.init
trunk/build/HADES/etc/hades-analyzed.sysconfig
trunk/build/HADES/etc/hades-example.conf
trunk/build/HADES/lib/
trunk/build/HADES/lib/Hades.pm
Log:
add build HADES MA files
Added: trunk/build/HADES/Build
===================================================================
--- trunk/build/HADES/Build (rev 0)
+++ trunk/build/HADES/Build 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,57 @@
+#! /usr/bin/perl
+
+use strict;
+use Cwd;
+use File::Basename;
+use File::Spec;
+
+sub magic_number_matches {
+ return 0 unless -e '_build/magicnum';
+ local *FH;
+ open FH, '_build/magicnum' or return 0;
+ my $filenum = <FH>;
+ close FH;
+ return $filenum == 985903;
+}
+
+my $progname;
+my $orig_dir;
+BEGIN {
+ $^W = 1; # Use warnings
+ $progname = basename($0);
+ $orig_dir = Cwd::cwd();
+ my $base_dir = '/home/unrz217/SA2T3-OPPD/trunk/build/HADES';
+ if (!magic_number_matches()) {
+ unless (chdir($base_dir)) {
+ die ("Couldn't chdir($base_dir), aborting\n");
+ }
+ unless (magic_number_matches()) {
+ die ("Configuration seems to be out of date, please re-run 'perl
Build.PL' again.\n");
+ }
+ }
+ unshift @INC,
+ (
+
+ );
+}
+
+close(*DATA) unless eof(*DATA); # ensure no open handles to this script
+
+use Module::Build;
+
+# Some platforms have problems setting $^X in shebang contexts, fix it up
here
+$^X = Module::Build->find_perl_interpreter;
+
+if (-e 'Build.PL' and not Module::Build->up_to_date('Build.PL', $progname)) {
+ warn "Warning: Build.PL has been altered. You may need to run 'perl
Build.PL' again.\n";
+}
+
+# This should have just enough arguments to be able to bootstrap the rest.
+my $build = Module::Build->resume (
+ properties => {
+ config_dir => '_build',
+ orig_dir => $orig_dir,
+ },
+);
+
+$build->dispatch;
Property changes on: trunk/build/HADES/Build
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/MANIFEST
===================================================================
--- trunk/build/HADES/MANIFEST (rev 0)
+++ trunk/build/HADES/MANIFEST 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,64 @@
+bin/hades-analyzed.pl
+bin/hades-analyzer.pl
+bin/hades-cfg-create.pl
+bin/hades-cfg-gui.pl
+bin/hades-cfg-test.pl
+bin/hades-check-data-ippm-csv.pl
+bin/hades-check-data-ippm.pl
+bin/hades-check-data-tracert.pl
+bin/hades-check-data.pl
+bin/hades-check-wrapper.pl
+bin/hades-cmd.pl
+bin/hades-cp.pl
+bin/hades-meta2db.pl
+bin/hades-mkmap.pl
+bin/hades-nagios-cfg-hosts.pl
+bin/hades-nagios-cfg-routes.pl
+bin/hades-nagios-check.pl
+bin/hades-plot.pl
+bin/hades-show-data.pl
+bin/hades-show-extremes.pl
+bin/hades-show-hosts.pl
+bin/hades-ssh.pl
+bin/hades-traceroute.pl
+bin/hadescfg.pl
+Build.PL
+etc/hades-analyzed.conf.example
+etc/hades-analyzed.init
+etc/hades-analyzed.sysconfig
+etc/hades-example.conf
+lib/Hades.pm
+lib/Hades/CGI.pm
+lib/Hades/Config.pm
+lib/Hades/Config/FileSaver.pm
+lib/Hades/Config/template.conf
+lib/Hades/Data.pm
+lib/Hades/Data/BWCTL.pm
+lib/Hades/Data/Finder.pm
+lib/Hades/Data/Finder_SQL.pm
+lib/Hades/Data/IPPM.pm
+lib/Hades/Data/IPPM_Aggregated.pm
+lib/Hades/Data/IPPM_Raw.pm
+lib/Hades/Data/IPPM_Sanitized.pm
+lib/Hades/Data/OWAMP.pm
+lib/Hades/Data/RIPE.pm
+lib/Hades/Data/Traceroute.pm
+lib/Hades/Data/Traceroute_Raw.pm
+lib/Hades/DB.pm
+lib/Hades/I18N.pm
+lib/Hades/I18N/de.pm
+lib/Hades/I18N/en.pm
+lib/Hades/Map.pm
+lib/Hades/Map/Circle.pm
+lib/Hades/Map/Coords.pm
+lib/Hades/Map/ImageMap.pm
+lib/Hades/Map/Star.pm
+lib/Hades/Plot.pm
+lib/Hades/Status.pm
+lib/Hades/Status/Matrix.pm
+lib/Hades/Status/Simple.pm
+lib/Hades/Status/Toby.pm
+lib/Hades/Warnings.pm
+MANIFEST This list of files
+Makefile.PL
+META.yml
Added: trunk/build/HADES/MANIFEST.SKIP
===================================================================
--- trunk/build/HADES/MANIFEST.SKIP (rev 0)
+++ trunk/build/HADES/MANIFEST.SKIP 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,44 @@
+# Avoid version control files.
+\bRCS\b
+\bCVS\b
+\bSCCS\b
+,v$
+\B\.svn\b
+\b_darcs\b
+
+# Avoid Makemaker generated and utility files.
+\bMANIFEST\.bak
+\bMakefile$
+\bblib/
+\bMakeMaker-\d
+\bpm_to_blib\.ts$
+\bpm_to_blib$
+\bblibdirs\.ts$ # 6.18 through 6.25 generated this
+
+# Avoid Module::Build generated and utility files.
+\bBuild$
+\b_build/
+
+# Avoid temp and backup files.
+~$
+\.old$
+\#$
+\b\.#
+\.bak$
+
+# Avoid Devel::Cover files.
+\bcover_db\b
+# Avoid configuration metadata file
+^MYMETA\.
+
+# Avoid Module::Build generated and utility files.
+\bBuild$
+\bBuild.bat$
+\b_build
+\bBuild.COM$
+\bBUILD.COM$
+\bbuild.com$
+^MANIFEST\.SKIP
+
+# Avoid archives of this distribution
+\bHADES-server-[\d\.\_]+
Added: trunk/build/HADES/MANIFEST.bak
===================================================================
--- trunk/build/HADES/MANIFEST.bak (rev 0)
+++ trunk/build/HADES/MANIFEST.bak 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,73 @@
+bin/hades-analyzed.pl
+bin/hades-analyzer.pl
+bin/hades-BWCTLcat.pl
+bin/hades-BWCTLconfigure
+bin/hades-BWCTLdisplayMDM.pl
+bin/hades-BWCTLdisplaytest.pl
+bin/hades-BWCTLhostconfig
+bin/hades-BWCTLparser.pl
+bin/hades-BWCTLwrapper
+bin/hades-cfg-create.pl
+bin/hades-cfg-gui.pl
+bin/hades-cfg-test.pl
+bin/hades-check-data-ippm-csv.pl
+bin/hades-check-data-ippm.pl
+bin/hades-check-data-tracert.pl
+bin/hades-check-data.pl
+bin/hades-check-wrapper.pl
+bin/hades-cmd.pl
+bin/hades-cp.pl
+bin/hades-meta2db.pl
+bin/hades-mkmap.pl
+bin/hades-nagios-cfg-hosts.pl
+bin/hades-nagios-cfg-routes.pl
+bin/hades-nagios-check.pl
+bin/hades-plot.pl
+bin/hades-RIPEanalyzer.pl
+bin/hades-RIPEcollector.pl
+bin/hades-RIPEconfig
+bin/hades-RIPEwrapper
+bin/hades-show-data.pl
+bin/hades-show-extremes.pl
+bin/hades-show-hosts.pl
+bin/hades-ssh.pl
+bin/hades-traceroute.pl
+bin/hadescfg.pl
+Build.PL
+etc/hades-analyzed.conf.example
+etc/hades-analyzed.init
+etc/hades-analyzed.sysconfig
+etc/hades-example.conf
+lib/Hades.pm
+lib/Hades/CGI.pm
+lib/Hades/Config.pm
+lib/Hades/Config/FileSaver.pm
+lib/Hades/Config/template.conf
+lib/Hades/Data.pm
+lib/Hades/Data/BWCTL.pm
+lib/Hades/Data/Finder.pm
+lib/Hades/Data/Finder_SQL.pm
+lib/Hades/Data/IPPM.pm
+lib/Hades/Data/IPPM_Aggregated.pm
+lib/Hades/Data/IPPM_Raw.pm
+lib/Hades/Data/IPPM_Sanitized.pm
+lib/Hades/Data/OWAMP.pm
+lib/Hades/Data/RIPE.pm
+lib/Hades/Data/Traceroute.pm
+lib/Hades/Data/Traceroute_Raw.pm
+lib/Hades/DB.pm
+lib/Hades/I18N.pm
+lib/Hades/I18N/de.pm
+lib/Hades/I18N/en.pm
+lib/Hades/Map.pm
+lib/Hades/Map/Circle.pm
+lib/Hades/Map/Coords.pm
+lib/Hades/Map/ImageMap.pm
+lib/Hades/Map/Star.pm
+lib/Hades/Plot.pm
+lib/Hades/Status.pm
+lib/Hades/Status/Matrix.pm
+lib/Hades/Status/Simple.pm
+lib/Hades/Status/Toby.pm
+lib/Hades/Warnings.pm
+MANIFEST This list of files
Added: trunk/build/HADES/META.yml
===================================================================
--- trunk/build/HADES/META.yml (rev 0)
+++ trunk/build/HADES/META.yml 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,86 @@
+---
+abstract: 'perl HADES implementation'
+author:
+ - 'DFN-Labor
<>'
+build_requires:
+ Test::More: 0
+configure_requires:
+ Module::Build: 0.36
+generated_by: 'Module::Build version 0.3607'
+license: apache
+meta-spec:
+ url: http://module-build.sourceforge.net/META-spec-v1.4.html
+ version: 1.4
+name: HADES-server
+provides:
+ Configfile:
+ file: lib/Hades/Config.pm
+ Hades:
+ file: lib/Hades.pm
+ version: 0.01
+ Hades::CGI:
+ file: lib/Hades/CGI.pm
+ Hades::Config:
+ file: lib/Hades/Config.pm
+ Hades::Config::FileSaver:
+ file: lib/Hades/Config/FileSaver.pm
+ Hades::DB:
+ file: lib/Hades/DB.pm
+ Hades::Data:
+ file: lib/Hades/Data.pm
+ Hades::Data::BWCTL:
+ file: lib/Hades/Data/BWCTL.pm
+ Hades::Data::Finder:
+ file: lib/Hades/Data/Finder.pm
+ Hades::Data::Finder_SQL:
+ file: lib/Hades/Data/Finder_SQL.pm
+ Hades::Data::IPPM:
+ file: lib/Hades/Data/IPPM.pm
+ Hades::Data::IPPM_Aggregated:
+ file: lib/Hades/Data/IPPM_Aggregated.pm
+ Hades::Data::IPPM_Raw:
+ file: lib/Hades/Data/IPPM_Raw.pm
+ Hades::Data::IPPM_Sanitized:
+ file: lib/Hades/Data/IPPM_Sanitized.pm
+ Hades::Data::OWAMP:
+ file: lib/Hades/Data/OWAMP.pm
+ Hades::Data::RIPE:
+ file: lib/Hades/Data/RIPE.pm
+ Hades::Data::Traceroute:
+ file: lib/Hades/Data/Traceroute.pm
+ Hades::Data::Traceroute_Raw:
+ file: lib/Hades/Data/Traceroute_Raw.pm
+ Hades::I18N:
+ file: lib/Hades/I18N.pm
+ Hades::I18N::de:
+ file: lib/Hades/I18N/de.pm
+ Hades::I18N::en:
+ file: lib/Hades/I18N/en.pm
+ Hades::Map:
+ file: lib/Hades/Map.pm
+ Hades::Map::Circle:
+ file: lib/Hades/Map/Circle.pm
+ Hades::Map::Coords:
+ file: lib/Hades/Map/Coords.pm
+ Hades::Map::ImageMap:
+ file: lib/Hades/Map/ImageMap.pm
+ Hades::Map::ImageMap::Area:
+ file: lib/Hades/Map/ImageMap.pm
+ Hades::Map::Star:
+ file: lib/Hades/Map/Star.pm
+ Hades::Plot:
+ file: lib/Hades/Plot.pm
+ version: 0.01
+ Hades::Status:
+ file: lib/Hades/Status.pm
+ Hades::Status::Matrix:
+ file: lib/Hades/Status/Matrix.pm
+ Hades::Status::Simple:
+ file: lib/Hades/Status/Simple.pm
+ Hades::Status::Toby:
+ file: lib/Hades/Status/Toby.pm
+ Hades::Warnings:
+ file: lib/Hades/Warnings.pm
+resources:
+ license: http://apache.org/licenses/LICENSE-2.0
+version: 1.0
Added: trunk/build/HADES/MYMETA.yml
===================================================================
--- trunk/build/HADES/MYMETA.yml (rev 0)
+++ trunk/build/HADES/MYMETA.yml 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,87 @@
+---
+abstract: 'perl HADES implementation'
+author:
+ - 'DFN-Labor
<>'
+build_requires:
+ Test::More: 0
+configure_requires:
+ Module::Build: 0.36
+dynamic_config: 0
+generated_by: 'Module::Build version 0.3607'
+license: apache
+meta-spec:
+ url: http://module-build.sourceforge.net/META-spec-v1.4.html
+ version: 1.4
+name: HADES-server
+provides:
+ Configfile:
+ file: lib/Hades/Config.pm
+ Hades:
+ file: lib/Hades.pm
+ version: 0.01
+ Hades::CGI:
+ file: lib/Hades/CGI.pm
+ Hades::Config:
+ file: lib/Hades/Config.pm
+ Hades::Config::FileSaver:
+ file: lib/Hades/Config/FileSaver.pm
+ Hades::DB:
+ file: lib/Hades/DB.pm
+ Hades::Data:
+ file: lib/Hades/Data.pm
+ Hades::Data::BWCTL:
+ file: lib/Hades/Data/BWCTL.pm
+ Hades::Data::Finder:
+ file: lib/Hades/Data/Finder.pm
+ Hades::Data::Finder_SQL:
+ file: lib/Hades/Data/Finder_SQL.pm
+ Hades::Data::IPPM:
+ file: lib/Hades/Data/IPPM.pm
+ Hades::Data::IPPM_Aggregated:
+ file: lib/Hades/Data/IPPM_Aggregated.pm
+ Hades::Data::IPPM_Raw:
+ file: lib/Hades/Data/IPPM_Raw.pm
+ Hades::Data::IPPM_Sanitized:
+ file: lib/Hades/Data/IPPM_Sanitized.pm
+ Hades::Data::OWAMP:
+ file: lib/Hades/Data/OWAMP.pm
+ Hades::Data::RIPE:
+ file: lib/Hades/Data/RIPE.pm
+ Hades::Data::Traceroute:
+ file: lib/Hades/Data/Traceroute.pm
+ Hades::Data::Traceroute_Raw:
+ file: lib/Hades/Data/Traceroute_Raw.pm
+ Hades::I18N:
+ file: lib/Hades/I18N.pm
+ Hades::I18N::de:
+ file: lib/Hades/I18N/de.pm
+ Hades::I18N::en:
+ file: lib/Hades/I18N/en.pm
+ Hades::Map:
+ file: lib/Hades/Map.pm
+ Hades::Map::Circle:
+ file: lib/Hades/Map/Circle.pm
+ Hades::Map::Coords:
+ file: lib/Hades/Map/Coords.pm
+ Hades::Map::ImageMap:
+ file: lib/Hades/Map/ImageMap.pm
+ Hades::Map::ImageMap::Area:
+ file: lib/Hades/Map/ImageMap.pm
+ Hades::Map::Star:
+ file: lib/Hades/Map/Star.pm
+ Hades::Plot:
+ file: lib/Hades/Plot.pm
+ version: 0.01
+ Hades::Status:
+ file: lib/Hades/Status.pm
+ Hades::Status::Matrix:
+ file: lib/Hades/Status/Matrix.pm
+ Hades::Status::Simple:
+ file: lib/Hades/Status/Simple.pm
+ Hades::Status::Toby:
+ file: lib/Hades/Status/Toby.pm
+ Hades::Warnings:
+ file: lib/Hades/Warnings.pm
+resources:
+ license: http://apache.org/licenses/LICENSE-2.0
+version: 1.0
Added: trunk/build/HADES/Makefile.PL
===================================================================
--- trunk/build/HADES/Makefile.PL (rev 0)
+++ trunk/build/HADES/Makefile.PL 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,6 @@
+# Note: this file was auto-generated by Module::Build::Compat version 0.3607
+ use Module::Build::Compat 0.02;
+
+ Module::Build::Compat->run_build_pl(args =>
\@ARGV);
+ require Module::Build;
+ Module::Build::Compat->write_makefile(build_class => 'Module::Build');
Added: trunk/build/HADES/_build/auto_features
===================================================================
--- trunk/build/HADES/_build/auto_features (rev
0)
+++ trunk/build/HADES/_build/auto_features 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,2 @@
+do{ my $x = {};
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/build_params
===================================================================
--- trunk/build/HADES/_build/build_params (rev
0)
+++ trunk/build/HADES/_build/build_params 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,135 @@
+do{ my $x = [
+ {
+ 'ARGV' => []
+ },
+ {},
+ {
+ 'verbose' => undef,
+ 'PL_files' => undef,
+ 'pollute' => undef,
+ 'bindoc_dirs' => [
+ 'blib/script'
+ ],
+ 'conflicts' => {},
+ 'recommends' => {},
+ 'scripts' => undef,
+ 'pod_files' => undef,
+ 'config_dir' => '_build',
+ 'dist_version' => '1.0',
+ 'sign' => undef,
+ 'recurse_into' => [],
+ 'build_bat' => 0,
+ 'extra_linker_flags' => [],
+ 'build_class' => 'Module::Build',
+ 'prereq_action_types' => [
+ 'requires',
+ 'build_requires',
+ 'conflicts',
+ 'recommends'
+ ],
+ 'base_dir' => '/home/unrz217/SA2T3-OPPD/trunk/build/HADES',
+ 'allow_mb_mismatch' => 0,
+ 'xs_files' => undef,
+ 'destdir' => undef,
+ 'metafile' => 'META.yml',
+ 'mb_version' => '0.3607',
+ 'use_tap_harness' => 0,
+ 'test_file_exts' => [
+ '.t'
+ ],
+ 'has_config_data' => undef,
+ 'dist_name' => 'HADES-server',
+ 'install_base' => undef,
+ 'module_name' => 'HADES',
+ 'recursive_test_files' => undef,
+ 'init_files' => {
+ 'etc/hades-analyzed.init' =>
'init/init.d/hades-analyzed'
+ },
+ 'libdoc_dirs' => [
+ 'blib/lib',
+ 'blib/arch'
+ ],
+ 'perl' => '/usr/bin/perl',
+ 'dist_author' => [
+ 'DFN-Labor
<>'
+ ],
+ 'bundle_inc' => [],
+ 'use_rcfile' => 1,
+ 'configure_requires' => {
+ 'Module::Build' => '0.36'
+ },
+ 'test_files' => undef,
+ 'dist_abstract' => 'perl HADES implementation',
+ 'create_readme' => undef,
+ 'prefix_relpaths' => {},
+ 'share_dir' => undef,
+ 'debug' => undef,
+ 'meta_merge' => {},
+ 'get_options' => {},
+ 'dist_version_from' => undef,
+ '_added_to_INC' => [],
+ 'auto_configure_requires' => 1,
+ 'create_license' => undef,
+ 'debugger' => undef,
+ 'html_css' => '',
+ 'cpan_client' => 'cpan',
+ 'bundle_inc_preload' => [],
+ 'build_elements' => [
+ 'PL',
+ 'support',
+ 'pm',
+ 'xs',
+ 'share_dir',
+ 'pod',
+ 'script',
+ 'etc',
+ 'init',
+ 'sysconfig'
+ ],
+ 'needs_compiler' => '',
+ 'orig_dir' => '/home/unrz217/SA2T3-OPPD/trunk/build/HADES',
+ 'include_dirs' => [],
+ 'installdirs' => 'site',
+ 'mymetafile' => 'MYMETA.yml',
+ 'create_makefile_pl' => 'small',
+ 'magic_number' => undef,
+ 'install_sets' => {},
+ 'tap_harness_args' => {},
+ 'sysconfig_files' => {
+ 'etc/hades-analyzed.sysconfig' =>
'init/sysconfig/hades-analyzed'
+ },
+ 'install_base_relpaths' => {
+ 'lib' => [
+ 'lib'
+ ]
+ },
+ 'meta_add' => {},
+ 'create_packlist' => 1,
+ 'requires' => {},
+ 'install_path' => {
+ 'init' => '/etc',
+ 'etc' => 'etc'
+ },
+ 'pm_files' => undef,
+ 'etc_files' => {
+ 'etc/hades-example.conf' => 'etc/hades.conf',
+ 'etc/hades-analyzed.conf.example' =>
'etc/hades-analyzed.conf'
+ },
+ 'quiet' => undef,
+ 'extra_compiler_flags' => [],
+ 'script_files' => undef,
+ 'build_script' => 'Build',
+ 'original_prefix' => {},
+ 'c_source' => undef,
+ 'program_name' => undef,
+ 'autosplit' => undef,
+ 'license' => 'apache',
+ 'build_requires' => {
+ 'Test::More' => 0
+ },
+ 'config' => undef,
+ 'blib' => 'blib',
+ 'prefix' => undef
+ }
+ ];
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/cleanup
===================================================================
--- trunk/build/HADES/_build/cleanup (rev 0)
+++ trunk/build/HADES/_build/cleanup 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,8 @@
+do{ my $x = {
+ 'perfSONAR-*' => 1,
+ 'NMWG-*' => 1,
+ 'HADES-server-1.0' => 1,
+ 'MANIFEST.SKIP' => 1,
+ 'blib' => 1
+ };
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/config_data
===================================================================
--- trunk/build/HADES/_build/config_data (rev
0)
+++ trunk/build/HADES/_build/config_data 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,2 @@
+do{ my $x = {};
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/features
===================================================================
--- trunk/build/HADES/_build/features (rev 0)
+++ trunk/build/HADES/_build/features 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,2 @@
+do{ my $x = {};
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/magicnum
===================================================================
--- trunk/build/HADES/_build/magicnum (rev 0)
+++ trunk/build/HADES/_build/magicnum 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1 @@
+985903
\ No newline at end of file
Added: trunk/build/HADES/_build/notes
===================================================================
--- trunk/build/HADES/_build/notes (rev 0)
+++ trunk/build/HADES/_build/notes 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,2 @@
+do{ my $x = {};
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/prereqs
===================================================================
--- trunk/build/HADES/_build/prereqs (rev 0)
+++ trunk/build/HADES/_build/prereqs 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,9 @@
+do{ my $x = {
+ 'build_requires' => {
+ 'Test::More' => 0
+ },
+ 'conflicts' => {},
+ 'requires' => {},
+ 'recommends' => {}
+ };
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/_build/runtime_params
===================================================================
--- trunk/build/HADES/_build/runtime_params (rev
0)
+++ trunk/build/HADES/_build/runtime_params 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,2 @@
+do{ my $x = {};
+$x; }
\ No newline at end of file
Added: trunk/build/HADES/bin/hades-analyzed.pl
===================================================================
--- trunk/build/HADES/bin/hades-analyzed.pl (rev
0)
+++ trunk/build/HADES/bin/hades-analyzed.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,1234 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+# - Detect and remove orphaned PID file! Or should it be done in init
scripts?
+# - There could perhaps be more messages for loglevels info and debug
+# - Enhance analyzer with proper signal handling to make graceful shutdown on
+# SIGUSR1 more useful.
+
+use strict;
+use warnings;
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+# Commen modules for all Hades/oppd daemons:
+use locale;
+use POSIX qw(setsid setpgid :sys_wait_h);
+use Log::Dispatch;
+use Log::Dispatch::File;
+use Log::Dispatch::Syslog;
+use Log::Dispatch::Screen;
+use Getopt::Long 2.32 qw(:config auto_help auto_version bundling);
+use Pod::Usage;
+use Config::General;
+# DateTime not needed by now, but this would be necessary, because Hades.pm
is
+# NOT loaded.
+#use DateTime;
+#use DateTime::Locale;
+#BEGIN {
+# if (DateTime::Locale->load(setlocale(LC_TIME))) {
+# DateTime->DefaultLocale(setlocale(LC_TIME));
+# }
+#}
+
+# Modules for this daemon:
+use File::Path;
+use IPC::Run qw(start pump finish timeout);
+
+
+#
+# Important variables that should be available and initialised before the
+# (possible) execution of the END block
+#
+my (
+ $proc_type, $pidfile_ok, $log, $log_prefix, $shutdown_gracefully,
+ $shutting_down
+);
+INIT {
+ $proc_type = "main"; # Some code is executed by all childrens that fork
and
+ # do not exec afterwards. So we have to know
+ # what to do exactly.
+ # See e.g. END block and signal handlers for possible
+ # values.
+ $pidfile_ok = 0; # Care about existing pidfile in END
+ $log = Log::Dispatch->new();
+ # We also need the Log::Dispatch object for option verification quite
early
+ $log_prefix = ""; # Prepended to log message if set. This is intended for
+ # child processes and should not be "missused"!
+ $shutdown_gracefully = 0; # END called without signal is like SIGTERM !!
+ #TODO Use another default?
+ $shutting_down = 0; # This is set directly after entering the END block.
+ # Can be used to determine whether the process is
+ # going down at the moment. Important e.g. in signal
+ # handlers!
+}
+
+
+#
+# Parse Configuration (commandline and file)
+#
+
+my (
+ $configfile, $noconfig,
+ $detach, $syslog, $logfile, $nologfile, $pidfile, $nopidfile,
+ $loglevel, $verbose, $syslog_host, $syslog_ident, $syslog_facility,
+ $analyzer, @domains, $timer_warn, $timer_kill,
+ $polling_sleeptime, $max_polling_errors, $retry_start_delay,
+ $day_begin_threshold,
+);
+GetOptions(
+ "config=s" => \$configfile,
+ "noconfig" => \$noconfig,
+ "detach|D!" => \$detach,
+ "logfile:s" => \$logfile,
+ "nologfile" => \$nologfile,
+ "pidfile:s" => \$pidfile,
+ "nopidfile" => \$nopidfile,
+ "syslog!" => \$syslog,
+ "syslog-host=s" => \$syslog_host,
+ "syslog-ident=s" => \$syslog_ident,
+ "syslog-facility=s" => \$syslog_facility,
+ "loglevel=s" => \$loglevel,
+ "verbose|v" => \$verbose,
+ "analyzer=s" => \$analyzer,
+ "domain=s" =>
\@domains,
+ "timer-warn=i" => \$timer_warn,
+ "timer-kill=i" => \$timer_kill,
+ "polling-sleeptime=i" => \$polling_sleeptime,
+ "max-polling-errors=i" => \$max_polling_errors,
+ "retry-start-delay=i" => \$retry_start_delay,
+ "day-begin-threshold=i" => \$day_begin_threshold,
+) or pod2usage(2);
+
+# Determine and load config file
+my %Config = ();
+my $Config;
+if ($noconfig) {
+ $configfile = undef;
+} else {
+ $configfile ||= "$FindBin::RealBin/../etc/analyzed.conf";
+ $Config = Config::General->new(
+ -ConfigFile => $configfile,
+ -ApacheCompatible => 1,
+ -AutoTrue => 1, # Could bring in some trouble, but it is really nice
;)
+ -CComments => 0, # Parsing is obviously broken in 2.36!
+ # Comments are found everywhere...
+ );
+ %Config = $Config->getall;
+}
+
+
+#
+# Calculate options
+# First not "undef" value is used.
+# Order: command line, config file, default
+#
+$detach = get_opt($detach, $Config{detach}, 1);
+$nologfile = get_opt($nologfile, 0); # No nologfile entry in config file!
+if ($nologfile) {
+ $logfile = undef;
+} else {
+ $logfile = get_opt($logfile, $Config{logfile}, 0);
+ if (!$logfile && $logfile ne "") {
+ # logfile disabled
+ $logfile = undef;
+ } elsif ($logfile eq "1" || $logfile eq "") {
+ # logfile enabled in configuration file or via --logfile without value
+ $logfile = "/var/log/hades-analyzed.log";
+ }
+}
+$nopidfile = get_opt($nopidfile, 0); # No nopidfile entry in config file!
+if ($nopidfile) {
+ $pidfile = undef;
+} else {
+ $pidfile = get_opt($pidfile, $Config{pidfile}, 1);
+ if (!$pidfile && $pidfile ne "") {
+ # pidfile disabled
+ $pidfile = undef;
+ } elsif ($pidfile eq "1" || $pidfile eq "") {
+ # pidfile enabled in configuration file or via --pidfile without value
+ $pidfile = "/var/run/hades-analyzed.pid";
+ }
+}
+$syslog = get_opt($syslog, $Config{syslog}, 0);
+$syslog_host = get_opt($syslog_host, $Config{'syslog-host'}, "");
+$syslog_ident =
+ get_opt($syslog_ident, $Config{'syslog-ident'}, "hades-analyzed");
+$syslog_facility =
+ get_opt($syslog_facility, $Config{'syslog-facility'}, "daemon");
+$loglevel = get_opt($loglevel, $Config{loglevel}, "notice");
+$verbose = get_opt($verbose, 0); # No verbose entry in config file!
+if ($verbose) {
+ $loglevel = "info";
+} else {
+ pod2usage( { -message => "Invalid log level: $loglevel",
+ -exitval => 2 } ) unless $log->level_is_valid($loglevel);
+}
+$analyzer =
+ get_opt($analyzer, $Config{analyzer},
"$FindBin::RealBin/hades-analyzer.pl");
+if (! open CHECKBIN, "$analyzer --version 2>&1 |") {
+ die "Could not execute \"$analyzer\": $!\n";
+} elsif (! <CHECKBIN>) { # elsif (! <CHECKBIN> =~ /TODO/) {
+ die "\"$analyzer\" is not a Hades analyzer\n";
+}
+close CHECKBIN;
+unless (@domains) {
+ # No domains on command line -> try configuration file
+ if (exists $Config{domain}) {
+ if (ref($Config{domain}) eq "ARRAY") {
+ @domains = @{$Config{domain}};
+ } else {
+ @domains = ( $Config{domain} );
+ }
+ } else {
+ unless (@domains) {
+ # No domain specified anywhere -> use default
+ @domains = ( "$FindBin::RealBin/../etc/hades.conf" );
+ }
+ }
+}
+my %domains = ();
+foreach (@domains) {
+ my ($domain, @params) = split; # Old: /^(\S+)(?:\s(.*))?$/
+ $_ = $domain; # Replace full string in @domains with only the domain name
+ $domains{$domain} = {
+ name => $domain, # very useful later on
+ params =>
\@params,
+ };
+}
+$timer_warn = get_opt($timer_warn, $Config{'timer-warn'}, 3600); # seconds
+$timer_kill = get_opt($timer_kill, $Config{'timer-kill'}, 10800); # seconds
+$polling_sleeptime = get_opt(
+ $polling_sleeptime, $Config{'polling-sleeptime'}, 1); # seconds
+$max_polling_errors = get_opt(
+ $max_polling_errors, $Config{'max-polling-errors'}, 10);
+$retry_start_delay = get_opt(
+ $retry_start_delay, $Config{'retry-start-delay'}, 600); # seconds
+$day_begin_threshold = get_opt(
+ $day_begin_threshold, $Config{'day-begin-threshold'}, 60); # seconds
+
+my $gracetime = 30; # Usefull as a parameter?
+
+
+#
+# Start logging ($log already initialised above)
+#
+
+if (defined $logfile) {
+ eval {
+ use locale;
+ use POSIX qw(locale_h strftime);
+ # "use locale", POSIX::strftime and localtime faster than DateTime
+ # and this has quite an impact on debug loging. Detected via SmokePing:
+ # http://perfsonar.acad.bg/smokeping
+ #use DateTime;
+ #use DateTime::Locale;
+ #BEGIN {
+ # if (DateTime::Locale->load(setlocale(LC_TIME))) {
+ # DateTime->DefaultLocale(setlocale(LC_TIME));
+ # }
+ #}
+ $log->add(
+ Log::Dispatch::File->new(
+ name => 'file',
+ min_level => $loglevel,
+ filename => $logfile,
+ mode => 'append', close_after_write => 1, autoflush => 1,
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ $p{message} = strftime("%FT%T",localtime) . " $p{level}
$p{message}";
+ #DateTime->now(time_zone=>"local")->strftime("%c") .
+ return "$p{message}\n";
+ },
+ )
+ );
+ };
+ die "Cannot write to log file '$logfile':
$@\n"
if $@;
+}
+if ($syslog) {
+ eval {
+ my $syslog_socket = 'unix';
+ if ($syslog_host) {
+ $Sys::Syslog::host = $syslog_host;
+ $syslog_socket = 'inet';
+ }
+ $log->add(
+ Log::Dispatch::Syslog->new(
+ name => 'syslog',
+ min_level => $loglevel,
+ ident => "$syslog_ident",
+ facility => "$syslog_facility",
+ socket => "$syslog_socket",
+ logopt => "ndelay",
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ return "$p{message}\n";
+ },
+ #mode => 'append', close_after_write => 0, autoflush => 1,
+ )
+ );
+ };
+ die "Cannot write to syslog:
$@\n"
if $@;
+}
+unless ($detach) {
+ $log->add(
+ Log::Dispatch::Screen->new(
+ name => 'screen',
+ min_level => $loglevel,
+ stderr => 1,
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ $p{message} = "$p{level}: $p{message}";
+ return "$p{message}\n";
+ },
+ )
+ );
+}
+
+# More flexible die:
+# Put error into Log and afterwards die with same message.
+# Also handy, because in the following code $@ is undef in die call:
+# $log->error($@); die $@;
+$SIG{__DIE__} = sub {
+ die @_ if $^S; # Ignore dies from evals
+ my $logmsg = join " - ", @_;
+ chomp $logmsg; # No new line for Log::Dispatch !
+ # We should only be called with initialised $log, but we can be a bit
+ # more friendly by only using it if it was initialised:
+ $log->error($logmsg) if defined $log &&
UNIVERSAL::isa($log,'Log::Dispatch');
+ die @_;
+};
+
+# More flexible warn:
+# Put error into Log and afterwards warn with same message.
+$SIG{__WARN__} = sub {
+ my $logmsg = join " - ", @_;
+ chomp $logmsg; # No new line for Log::Dispatch !
+ # We should only be called with initialised $log, but we can be a bit
+ # more friendly by only using it if it was initialised:
+ $log->warning($logmsg)
+ if defined $log && UNIVERSAL::isa($log,'Log::Dispatch');
+ warn @_;
+};
+
+
+#
+# Daemonize
+#
+
+if ($detach) {
+ # Fork once, and let the parent exit.
+ my $pid = fork;
+ if ($pid) { $proc_type = "dummy"; exit; }
+ defined($pid) or die "Could not fork: $!\n";
+
+ # Dissociate from the controlling terminal that started us and stop being
+ # part of whatever process group we had been a member of.
+ setsid() or die "Cannot start a new session: $!\n";
+
+ # In Proc::Daemon there is a second fork executed with the following
comment:
+ # "Forks another child process and exits first child. This prevents the
+ # potential of acquiring a controlling terminal."
+ # This is nowhere else mentioned! Neither in Perl nor standard UNIX
+ # documentation.
+ # IMPORTANT: If you put a second fork here, the process group is most
likely
+ # not correct for sending signals e.g. in the END block!
+
+ # chdir and set umask
+ chdir '/' or die "Cannot chdir to '/': $!\n";
+ #umask 0;
+
+ setup_pidfile() if defined $pidfile;
+ # Do it before closing file handles! We need the error messages!
+
+ # Close default file handles
+ close STDIN or die "Could not close STDIN: $!\n";
+ close STDOUT or die "Could not close STDOUT: $!\n";
+ close STDERR or die "Could not close STDERR: $!\n";
+ # Reopen stderr, stdout, stdin to /dev/null
+ open(STDIN, "</dev/null");
+ open(STDOUT, ">/dev/null");
+ open(STDERR, ">/dev/null");
+} else {
+ setpgid(0,0) or die "Cannot set process group id: $!\n";
+ setup_pidfile() if defined $pidfile;
+}
+
+#
+# Signal handlers
+#
+
+# die on typical signals
+my $time_to_die = 0;
+$SIG{INT} = $SIG{TERM} = sub {
+ $log->notice("Caught SIG$_[0] - initiating shutdown");
+ $shutdown_gracefully = 0;
+ exit 1;
+ # See END {} for shutdown sequence
+};
+$SIG{USR1} = sub {
+ # Gracefull shutdown with timeout
+ $log->notice("Caught SIGUSR1 - initiating gracefull shutdown");
+ $shutdown_gracefully = $gracetime;
+ exit 1;
+ # See END {} for shutdown sequence
+};
+$SIG{USR2} = sub {
+ # Gracefull shutdown WITHOUT timeout -> Possibly blocking forever!
+ $log->notice("Caught SIGUSR2 - initiating gracefull shutdown");
+ #$shutdown_gracefully = -1; # Not used! We are not exiting at once!
+ # When we reach END, all children have exited.
+ $time_to_die = 1;
+};
+$SIG{HUP} = sub {
+ $log->notice("SIGHUP caught - restarting analyzers");
+ foreach my $domain (@domains) {
+ # @domains can be filled, but harnesses are not (yet) initialised
+ next unless exists($domains{$domain}->{harness})
+ && defined($domains{$domain}->{harness})
+ && UNIVERSAL::isa($domains{$domain}->{harness}, "IPC::Run");
+ $log->info("Trying to terminate analyzer for \"$domain\"");
+ kill_analyzer($domains{$domain});
+ $domains{$domain}->{harness} = undef; # Mark for restart
+ }
+};
+$SIG{PIPE} = 'IGNORE';
+$SIG{TSTP} = $SIG{TTOU} = $SIG{TTIN} = 'IGNORE'; # ignore tty signals
+$SIG{CHLD} = \&REAPER; # Care about child processes. See below.
+
+#
+# Inform that everything looks good
+#
+
+$log->notice("hades-analyzed started");
+$log->info("PID $$ written to $pidfile") if defined $pidfile;
+
+
+#
+# Loop doing the work. Only exiting on signals
+#
+
+while (1) {
+
+ my @running_domains = (); # Currently running domains for checks after
going
+ # through all domains.
+ foreach my $domain (@domains) {
+ sleep $polling_sleeptime; # Be nice
+ my $d = $domains{$domain};
+
+ # Start analyzer, if not running:
+ unless (
+ exists($d->{harness}) && defined($d->{harness}) &&
+ UNIVERSAL::isa($d->{harness}, "IPC::Run")
+ # This may help to recover from unforseen errors and states of the
daemon
+ ) {
+ # Harness not started or has to be restarted
+ next if $time_to_die; # Do not restart if we are about to exit!
+ start_analyzer($d) or next; # next if not started!
+ }
+
+ # pump_nb() and pumpable() may raise an exception so we just put
everything
+ # into eval to prevent our daemon from die-ing. Exceptions in the other
+ # subs should all be handled separatly, but it's most likely a good idea
to
+ # catch them accidentally instead of die-ing ...
+ eval {
+ if ($d->{harness}->pumpable) {
+ # Now we know that the analyzer is not (really) finished yet!
+ push @running_domains, $domain;
+ # Pull output from analyzer
+ $d->{harness}->pump_nb;
+ process_buffers($d);
+ check_longrun($d);
+ } else {
+ # Obviously analyzer has finished -> finish and trigger restart
+ finish_analyzer($d);
+ }
+ };
+ if ($@) {
+ $log->error("Exception occured " .
+ "while trying to pump output from analyzer for \"$d->{name}\": " .
$@);
+ $d->{pump_error_count}++;
+ if ($d->{pump_error_count} > $max_polling_errors) {
+ $log->error("Unable to pump analyzer for \"$d->{name}\" " .
+ "for the last $d->{pump_error_count} times - Killing analyzer");
+ kill_analyzer($d);
+ $d->{harness} = undef; # Mark for restart
+ }
+ } else {
+ $d->{pump_error_count} = 0;
+ }
+ }
+ $log->debug("Currently running domains: " . join " ", @running_domains);
+ if ($time_to_die && $#running_domains < 0) {
+ $log->notice("All analyzers exited - Exiting");
+ exit 0;
+ }
+}
+
+
+die "Internal error: This code should not be reached!\n";
+
+
+
+### END OF MAIN ###
+
+
+
+# Returns the first found parameter with a "defined" value
+sub get_opt {
+ foreach (@_) {
+ return $_ if defined;
+ }
+ return undef;
+}
+
+
+END {
+ # END could be executed without most if the initialisation from above
already
+ # done!
+ # At least the following variables should be already available via the INIT
+ # block (other should be considered to be possibly undef or empty):
+ # $proc_type, $pidfile_ok, $log, $log_prefix, $shutdown_gracefully,
+ # $shutting_down
+ # Keep this also in mind for subs called in the code below!
+ $shutting_down = 1;
+ return if $proc_type eq "dummy"; # Do not execute anything below
+ my $exitcode = $?; # Save $?
+ $log->info("Starting shutdown sequence");
+ # Try to kill analyzers before exiting:
+ foreach my $domain (@domains) {
+ # @domains can be filled, but harnesses are not (yet) initialised
+ next unless exists($domains{$domain}->{harness})
+ && defined($domains{$domain}->{harness})
+ && UNIVERSAL::isa($domains{$domain}->{harness}, "IPC::Run");
+ $log->info("Trying to terminate analyzer for \"$domain\"");
+ kill_analyzer(
+ $domains{$domain}, $shutdown_gracefully ? $shutdown_gracefully : 1
+ );
+ }
+ # Clean up PID file:
+ unlink $pidfile if $pidfile_ok && -e $pidfile;
+ $log->notice("Exiting");
+ $? = $exitcode; # Restore $?
+}
+
+#
+# setup pid file
+#
+sub setup_pidfile {
+ die("PID file ($pidfile) contains pid! Already running?\n")
+ if -e $pidfile && -s $pidfile;
+ open(PIDFILE, ">$pidfile")
+ or die("Could not write PID file ($pidfile): $!\n");
+ print PIDFILE "$$\n";
+ $pidfile_ok = 1;
+ close PIDFILE
+ or die("Could not write PID file ($pidfile): $!\n");
+}
+
+
+#
+# This is our SIGCHLD handler. Not really doing important things...
+#
+sub REAPER { # see also 'man perlipc'
+ local $!; # waitpid() and others may overwrite current error
+ while ((my $pid = waitpid(-1,WNOHANG)) > 0) {
+ my $reason = $? ? " with exit code $?" : "";
+ $log->debug("Child process $pid exited" . $reason);
+ }
+ $SIG{CHLD} = \&REAPER; # loathe sysV
+}
+
+
+sub get_ymd {
+ my $time = defined $_[0] ? $_[0] : time;
+ my (undef, undef, undef, $day, $month, $year) = gmtime($time);
+ $year += 1900;
+ $month += 1;
+ return $year, $month, $day;
+}
+
+
+# Everything is UTC and we are using Epoch arithmetic to speed up things and
+# avoid loading DateTime.
+#TODO Better use IPC::Open3 ???? Also look at waitpid in perlfunc man page
!!!
+sub start_analyzer {
+ my $d = shift;
+ my @options = @_;
+
+ my $epoch = time;
+ my $eday = int($epoch / 86400); # 86400 = 24*60*60
+ my $seconds_of_day = $epoch % 86400;
+ if ($seconds_of_day < $day_begin_threshold) {
+ # We wait a small grace time to let the new day begin really everywhere.
+ my $delay = $day_begin_threshold - $seconds_of_day;
+ $log->debug("Delaying start of new day for \"$d->{name}\"" .
+ " for $delay seconds");
+ sleep $delay;
+ # Some time has passed... Get current time.
+ $epoch = time;
+ $eday = int($epoch / 86400); # 86400 = 24*60*60
+ }
+ my ($year,$mon,$day);
+
+ if ($d->{retry_start}) {
+ if ($d->{retry_start} < $epoch) {
+ $log->debug("Not re-trying to start analyzer for \"$d->{name}\" yet");
+ return;
+ } else {
+ $log->debug("Re-trying to start analyzer for \"$d->{name}\"");
+ $d->{retry_start} = 0;
+ }
+ }
+
+ if (defined($d->{start_day}) && $eday > $d->{start_day}) {
+ # New day since last run
+ # -> Finish yesterday (there once was --yesterday in data2www.pl)
+ unshift @options, "--gzip", "--sleeptime=0", "--copy-config";
+ ($year,$mon,$day) = get_ymd($d->{start_time});
+ $log->info("Starting analyzer for \"$d->{name}\" for yesterday");
+ } else {
+ ($year,$mon,$day) = get_ymd($epoch);
+ $log->info("Starting analyzer for \"$d->{name}\" for today");
+ }
+
+ my ($in, $out, $err);
+ $d->{timer_warned} = 0;
+ $d->{out_buffer} = "";
+ $d->{pump_error_count} = 0;
+ my @command = (
+ $analyzer, "--config=$d->{name}", @{$d->{params}}, @options,
+ "--year=$year", "--month=$mon", "--day=$day"
+ );
+ $log->debug("Executing command: " . join(" ", @command));
+ eval {
+ $d->{harness} =
start(\@command,
\$in, \$out, \$err);
+ };
+ if ($@) {
+ $log->error("Exception occured " .
+ "while trying to run analyzer for \"$d->{name}\": " . $@);
+ $log->notice("Re-trying to run analyzer for \"$d->{name}\" " .
+ "in $retry_start_delay seconds");
+ $d->{retry_start} = $epoch + $retry_start_delay;
+ $d->{harness} = undef;
+ return;
+ }
+ # Only set the following paramters if start was successful!
+ $d->{in} = \$in; $d->{out} = \$out; $d->{err} = \$err;
+ $d->{start_time} = $epoch;
+ $d->{start_day} = $eday;
+ return 1;
+}
+
+
+#
+# Sent output buffers to $log, if necessary.
+#
+sub process_buffers {
+ my $d = shift;
+ if (${$d->{out}}) {
+ my @out = _process_buffer_helper($d,"out");
+ foreach my $line (@out) {
+ #TODO more sophisticated parsing based on a more sophisticated output
+ # from hades-analyzer.pl :)
+ my $loglevel = "warning";
+ if (
+ $line =~ /^data.*No valid data in raw data file/ ||
+ $line =~ /^Error opening data file/
+ ) {
+ next; # TODO debug or info ?
+ }# elsif ($line =~ /Something really bad happened/) {
+ # $loglevel = "error";
+ #}
+ $log->log( level => $loglevel, message => "$d->{name}: $line" );
+ }
+ }
+ if ($d->{err}) {
+ my @err = _process_buffer_helper($d,"err");
+ foreach my $line (@err) {
+ #TODO more sophisticated parsing based on a more sophisticated output
+ # from hades-analyzer.pl :)
+ my $loglevel = "warning";
+ if ($line =~ /^FIXME/) {
+ # Known bugs are not worth a warning
+ $loglevel = "notice";
+ }
+ $log->log( level => $loglevel, message => "$d->{name}: $line" );
+ }
+ }
+}
+
+sub _process_buffer_helper {
+ my ($d, $src) = @_;
+ my $buffer = $src . "_buffer";
+ my @t = split '\n', ${$d->{$src}}, -1; #TODO is split really a good idea?
+ ${$d->{$src}} = ""; # empty buffer
+ if ($t[-1]) {
+ # There is no empty last entry -> There was no '\n' at the end of
+ # string
+ $d->{$buffer} = $t[-1];
+ }
+ pop @t; # Remove empty or buffered last entry
+ if ($d->{$buffer}) {
+ # There is some output left from the last pump...
+ $t[0] = $d->{$buffer} . $t[0];
+ $d->{$buffer} = ""; # clear buffer
+ }
+ return @t;
+}
+
+#
+# Kill a (perhaps) running analyzer
+#
+#TODO May it survive somehow?
+sub kill_analyzer {
+ my $d = shift;
+ my $gracetime = shift || 5;
+ eval {
+ # IMPORTANT: The documentation of IPC::Run is inconsistent with actual
+ # code! "undef" means that TERM was successful. "1" means KILL was
+ # necessary.
+ if ($d->{harness}->kill_kill(grace => $gracetime)) {
+ $log->error("SIGKILL was necessary to quit analyzer for
\"$d->{name}\"");
+ } else {
+ $log->debug("Analyzer terminated successfully");
+ }
+ };
+ if ($@) {
+ $log->error("Exception occured " .
+ "while trying to kill analyzer for \"$d->{name}\": " . $@);
+ #TODO Has it survived? What can we do about it?
+ return;
+ }
+ return 1;
+}
+
+
+#
+# Handle long running analyzers
+#
+sub check_longrun {
+ my $d = shift;
+ if ((time - $d->{start_time}) >= $timer_warn) {
+ if ((time - $d->{start_time}) >= $timer_kill) {
+ $log->warning("analyzer for \"$d->{name}\" " .
+ "running longer than $timer_kill seconds - KILLING");
+ kill_analyzer($d);
+ $d->{harness} = undef; # Mark for restart
+ return;
+ } elsif (!$d->{timer_warned}) {
+ # Warn only once if analyzer is running longer than $timer_warn
+ $log->warning("analyzer for \"$d->{name}\" " .
+ "running longer than $timer_warn seconds");
+ $d->{timer_warned} = 1;
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+}
+
+#
+# Obviously analyzer is finished -> finish and trigger restart
+#
+sub finish_analyzer {
+ my $d = shift;
+
+ $log->info("Analyzer for \"$d->{name}\" finished");
+
+ my $result = 0;
+ eval {
+ unless ($d->{harness}->finish) {
+ # There should be only one result code and this one should be != 0
+ $result = $d->{harness}->full_result;
+ }
+ };
+ if ($@) {
+ $log->error("Exception occured " .
+ "while trying to finish analyzer for \"$d->{name}\": " . $@);
+ kill_analyzer($d);
+ }
+ unless ($result eq "unknown result, unknown PID" || $result == 0) {
+ #TODO I really don't know what this "unknown result, unknown PID" really
+ # is about! Should we care about it?
+ if ($result & 127) {
+ $log->warning(
+ sprintf("Analyzer died with signal %d, %s coredump\n",
+ ($result & 127), ($result & 128) ? 'with' : 'without')
+ );
+ } else {
+ $log->warning(
+ sprintf("Analyzer exited with value %d\n", $result >> 8)
+ );
+ }
+ }
+ # finish is pumping the rest, but since pumpable is already false,
+ # there should be nothing left. Just to be sure, we check and process
+ # it once more:
+ process_buffers($d);
+ $d->{harness} = undef; # Mark for restart
+ return 1;
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+hades-analyzed.pl - Daemon for data retrieval and analysis
+
+=head1 SYNOPSIS
+
+B<hades-analyzed.pl> [OPTIONS]
+
+
+
+=head1 DESCRIPTION
+
+B<hades-analyzed.pl> is a daemon that is retrieving measurement data from
+the measurement boxes and analysing them. The real hard work is done by
+B<hades-analyzer.pl> and B<hades-analyzed.pl> is only doing supervisor tasks:
+
+=over
+
+=item *
+
+Can run "forever" as daemon providing the typical UNIX style control
+mechanisms like a PID file or Syslog or file logging.
+
+=item *
+
+Handling day to day transition: Triggering one more round for the day that
has
+just passed and let the analyzer compress the data files afterwards. Start
+the next round for the new day. The additional parameters used for the
+analysis of yesterday: B<--gzip> B<--sleeptime=0> B<--copy-config>
+
+=item *
+
+Capture and filter the output (STDIN and STDERR) of B<hades-analyzer.pl> and
+send it to the logging mechanism used for B<hades-analyzed.pl>.
+
+=item *
+
+Tries to act correctly on B<hades-analyzer.pl> problems. E.g. kills analyzers
+that are running too long and most likely ran into a dead lock.
+
+=back
+
+
+
+=head1 DOMAINS
+
+In order to run B<hades-analyzed.pl> you have to configure the Hades domains
+the daemon should care about. This is normally done via the configuration
file,
+but there is also a command line parameter available (see B<--domain> in
+L<OPTIONS> below).
+
+For every domain that should be handled by analyzed you should put one
+domain line in the configuration file. The value is a string representing a
+call to B<hades-analyzer.pl>. The first word in the line is the domain or the
+Hades configuration file for the domain (parameter B<--config> of
+B<hades-analyzer.pl>). The rest of the line is split on white space and used
as
+parameters for B<hades-analyzer.pl> as they are.
+
+There is no shell like quoting available at the moment. Parameters are
+directly passed to IPC::Run via start().
+
+You normally add at least the parameters B<--updatedb> and B<--map> to write
+meta data to the database and call B<hades-mkmap.pl> in order to generate the
+weather maps.
+
+The output (STDOUT and STDERR) of the executed B<hades-analyzer.pl> is parsed
+and filtered to some extent. Some messages are just thrown away, other are
+sent to the log with other loglevels. You can use B<--verbose> and B<--debug>
+in the domain options to log more information, but the parsing is NOT
+adjusted. You will e.g. not see the messages thrown away by the parsing and
+the additional output will also not be handled in any way, just passed on!
+
+Do NOT use parameters used by analyzed itself! At the moment the following
+parameters are used: B<--config>, B<--year>, B<--month>, B<--day>,
+B<--sleeptime>, B<--gzip>
+
+As default one domain is used that is configured by the file F<hades.conf> in
+the F<etc> subdirectory in the directory below the directory
+B<hades-analyzed.pl> is located. Of course, no additional parameters are
used.
+
+Examples for configuration file:
+
+ domain domain1 --updatedb --map
+ domain domain2 --updatedb --map --wwwdir=/tmp/save
+ domain domain2 --updatedb --map --current --status
+
+
+
+=head1 OPTIONS
+
+This is a full list of available command line options. Please keep in mind
+that this script does NOT provide the normal Hades command line options
+or configuration file options!
+Some options might even look familiar, although they are used slightly
+different!
+
+Nearly all options have a built in default that can be overwritten using
+command line arguments or variables in the configuration file.
+Arguments have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Read configuration file F<CONFIGFILE> for options.
+
+Default: F<PATH_TO_ANALYZED/../etc/analyzed.conf>
+
+
+=item B<--noconfig>
+
+Do not read any configuration file. The parameter B<--config> is also
ignored!
+
+Default: off
+
+
+=item B<--[no]detach>
+
+Detach from terminal, aka run in background (instead of foreground).
+Log messages will not be sent to F<STDERR>.
+
+Default: on
+
+Configuration file: B<detach>
+
+
+=item B<--logfile>[=F<LOGFILE>]
+
+Append messages to file F<LOGFILE>.
+
+Just use B<--logfile> without the optional value to enable logging to default
+log file F</var/log/hades-traceroute.log>.
+
+You can use this option together with B<--syslog>.
+Messages will then be written to both, log file and system log.
+
+Default: off
+
+Configuration file: B<logfile>
+
+
+=item B<--nologfile>
+
+Do not write to any log file. The parameter B<--logfile> is also ignored!
+
+Default: off
+
+Configuration file: use B<logfile>
+
+
+=item B<--[no]syslog>
+
+Whether messages should be written to system log.
+
+You can use this option together with B<--logfile>.
+Messages will then be written to both, log file and system log.
+
+Default: off
+
+Configuration file: B<syslog>
+
+
+=item B<--syslog-host>=I<HOST>
+
+Use I<HOST> as host to which system log messages are forwarded.
+
+If this option is set to a dns name or ip address, all system log messages
+are forwarded to the specified remote host.
+If set to the empty string ("") logging is done locally.
+
+Default: log locally
+
+Configuration file: B<syslog-host>
+
+
+=item B<--syslog-ident>=I<IDENT>
+
+The string I<IDENT> will be prepended to all messages in the system log.
+
+Default: I<hades-traceroute>
+
+Configuration file: B<syslog-ident>
+
+
+=item B<--syslog-facility>=I<FACILITY>
+
+Use I<FACILITY> as type of program for system logging.
+
+This string will be used as the system log facility for messages sent to
+the system log.
+
+See your C<syslog(3)> documentation for the facilities available on your
+system.
+Typical facilities are I<auth>, I<authpriv>, I<cron>, I<daemon>, I<kern>,
+I<local0> through I<local7>, I<mail>, I<news>, I<syslog>, I<user>, I<uucp>.
+
+Default: I<daemon>
+
+Configuration file: B<syslog-facility>
+
+
+=item B<--loglevel>=I<LOGLEVEL>
+
+Use I<LOGLEVEL> as log level used for logging to syslog and to the log files.
+
+This option is used for setting the verbosity of the running daemon.
+The log levels available are the log levels defined by Log::Dispatch.
+
+This is a list of values that should be accepted:
+ 0 = debug
+ 1 = info
+ 2 = notice
+ 3 = warning
+ 4 = err = error
+ 5 = crit = critical
+ 6 = alert
+ 7 = emerg = emergency
+
+Default: I<notice>
+
+Configuration file: B<loglevel>
+
+
+=item B<--verbose>
+
+Just a handy abbreviation for B<--loglevel>=I<info>.
+
+Default: not set, see B<--loglevel>
+
+Configuration file: use B<loglevel>=I<info>
+
+
+=item B<--pidfile>[=F<PIDFILE>]
+
+Use PIDFILE as name of pid file.
+The pid file contains the Process ID of the running oppd service.
+
+Just use B<--pidfile> without the optional value to use the default pid file
+F</var/run/hades-traceroute.pid>.
+
+Default: F</var/run/hades-traceroute.pid>
+
+Configuration file: B<pidfile>
+
+
+=item B<--nopidfile>
+
+Do not use a pid file. The parameter B<--pidfile> is also ignored!
+
+Default: off
+
+Configuration file: use B<pidfile>
+
+
+=item B<--analyzer>|B<-t>=F<ANALYZER>
+
+Use F<ANALYZER> as analyzer script to run for doing the "real" work.
+Can be an absolute or relative path. For relative paths the PATH environment
+variable is searched. For security reasons you normally avoid using relative
+paths.
+
+B<NOTE>: You cannot add command line options here! Only the path to the
binary
+(without white space)!
+
+Default: The script F<hades-analyzer.pl> residing in the same directory as
+F<hades-analyzed.pl> is used as default.
+
+Configuration file: B<analyzer>
+
+
+=item B<--domain>=I<DOMAIN-CONFIG>
+
+You can use one or more B<--domain> parameters to configure one or more
domains
+as you normally do in the configuration file. Domains listed in the
+configuration file are then ignored!
+
+Every I<DOMAIN-CONFIG> string has the same syntax as an entry in the
+configuration file (without the keyword B<domain>, of course). See L<DOMAINS>
+above.
+
+B<NOTE:> Don't forget to quote the white space for the shell!
+
+Default: F<PATH_TO_ANALYZED/../etc/hades.conf>
+
+Configuration file: B<domain>
+
+
+=item B<--timer-warn>=I<SECONDS>
+
+If an B<hades-analyzer.pl> for a domain is running longer than the specified
+time in seconds, a warning message will be created.
+
+Default: 3600 (one hour)
+
+Configuration file: B<timer-warn>
+
+
+=item B<--timer-kill>=I<SECONDS>
+
+If an B<hades-analyzer.pl> for a domain is running longer than the specified
+time in seconds, it will get killed by first sending SIGTERM and
+then SIGKILL.
+
+Default: 10800 (three hours)
+
+Configuration file: B<timer-kill>
+
+
+=item B<--polling-sleeptime>=I<SECONDS>
+
+This variable allows you to set a time in seconds that will tell
+B<hades-analyzed.pl> how long to sleep between polling every running
+B<hades-analyzer.pl>. This value should not be to high, because it will slow
+down B<hades-analyzed.pl> significantly especially when a lot of domains are
+configured. It will also lead to a significant delay till a finished
+B<hades-analyzer.pl> is started again.
+
+Default: 1
+
+Configuration file: B<polling-sleeptime>
+
+
+=item B<--max-polling-errors>=I<COUNT>
+
+If an B<hades-analyzer.pl> cannot be polled for new output on STDOUT and
+STDERR it is considered to hang. Instead of killing it at once, we keep on
+trying the specified number of times. These retries are done in normal
+polling order, so all other domains are polled, before the one that failed
+is tried again. Nevertheless the polling takes place quite frequently
+(depending also on B<--polling-sleeptime>, see above) and therefore
+B<max-polling-errors> does not need to be small to kill the hanging
+hades-analyzer.pl fast enough.
+
+Default: 10
+
+Configuration file: B<max-polling-errors>
+
+
+=item B<--retry-start-delay>=I<SECONDS>
+
+If executing an B<hades-analyzer.pl> fails, B<hades-analyzed.pl> will not try
+to start it again at once, but wait at least the specified time in seconds.
+This prevents too many consecutive tries and the lots of error messages you
+will get. Normally an error condition is not going away fast.
+
+Default: 600 (five minutes)
+
+Example:
+retry-start-delay 300
+
+
+=item B<--day-begin-threshold>=I<SECONDS>
+
+It is not necessary that all system clocks involved in Hades measurements are
+hardware synchronised. This makes a proper day change difficult. Therefore
+B<hades-analyzed.pl> has the possibility to set a threshold in seconds that
+delays the transition to a new day (if necessary) so that the final analysis
+of yesterday is started not earlier than I<SECONDS> seconds after
+midnight.
+
+Default: 60 (one minute)
+
+Configuration file: B<day-begin-threshold>
+
+
+=back
+
+
+
+=head1 SIGNALS
+
+The hades-analyzed can be controlled by using various signals.
+
+
+=over
+
+
+=item SIGHUP
+
+SIGHUP leads to restarting all analyzers and thus rereading their
configuration
+files. Daemon is NOT reconfigured (rereading of main configuration file).
+
+
+=item SIGINT and SIGTERM
+
+Daemon terminates immediately. A SIGKILL is sent to the child processes
+shortly after giving them the chance to exit properly by sending them a
+SIGTERM.
+
+
+=item SIGUSR1
+
+Daemon terminates gracefully by sending all child processes a SIGUSR1 and
+waiting a specified time (at the moment 30 seconds) before sending them a
+SIGKILL.
+
+=for comment TODO There might be an option for $gracetime in the future.
+
+
+=item SIGUSR2
+
+Daemon terminates gracefully by waiting for all analyzers to finish.
+The daemon might therefore wait forever and never return if there is
something
+wrong with one of the analyzers!
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Start with a different configuration file:
+
+ $ hades-analyzed.pl --config=/usr/local/etc/hades-analyzed.conf
+
+Debug the daemon:
+
+ $ hades-analyzed.pl --nodetach \
+ --loglevel=debug --nologfile --nopidfile --nosyslog
+
+Use other domain configuration and not the one from configuration file:
+
+ $ hades-analyzed.pl --domain="domain1 --updatedb --map" \
+ --domain="domain2 --updatedb --map --wwwdir=/tmp/save-tmp" \
+ --domain="domain2 --updatedb --map --current"
+
+
+
+=head1 SEE ALSO
+
+hades-analyzed.conf
+=for comment TODO This one has to be written...
+
+
+
+=head1 AUTHORS
+
+DFN Labor Erlangen,
Property changes on: trunk/build/HADES/bin/hades-analyzed.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-analyzer.pl
===================================================================
--- trunk/build/HADES/bin/hades-analyzer.pl (rev
0)
+++ trunk/build/HADES/bin/hades-analyzer.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,766 @@
+#!/usr/bin/perl
+
+#TODO
+# - traceroute support is a hack at the moment, because of problems in
+# Hades::Data::Finder->from_file
+
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use English;
+#use Statistics::Descriptive;
+use Storable qw(nstore);
+use DateTime;
+use DateTime::Format::HTTP;
+use Pod::Usage;
+use File::Basename;
+use File::Copy;
+use YAML;
+use POSIX;
+use IPC::Run qw(run timeout);
+
+use Hades;
+use Hades::DB;
+use Hades::Data::Finder;
+use Hades::Data::IPPM_Raw;
+use Hades::Data::IPPM_Aggregated;
+
+create_config(
+ "new|n!" => 0,
+ "updatedb!" => 0,
+ "writedata!" => 1,
+ "status!" => 0, #TODO more or less a hack! Normally only from
analyzed!
+ "current!" => 0, #TODO more or less a hack! Normally only from
analyzed!
+ "map!" => 0,
+ "dry-run!" => 0,
+ "copy-config!" => 0,
+ "year=i" => undef,
+ "month=i" => undef,
+ "day=i" => undef,
+ "today" => undef,
+ "yesterday" => undef,
+ "gzip!" => 0,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $configfile = $config{configfile};
+my $domain = $config{config};
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+#my $new = $config{new};
+my $new = 1; # FIXME implement opposite of --new and reenable it here...
+my $updatedb = $config{updatedb};
+my $writedata = $config{writedata};
+my $bindir = $config{bindir};
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+my $gzip = $config{gzip};
+my $current = $config{current};
+my $map = $config{map};
+my $copy_config = $config{copy_config};
+my $status = $config{status};
+$status = 0 unless $domain eq "geant"; # Just to be sure ...
+my $status_matrix;
+if ($status) {
+ eval require Hades::Status::Toby;
+ if ($@) {
+ warn "Cannot load status module: $@";
+ $status = 0;
+ }
+}
+
+if ($config{dry_run}) {
+ $updatedb = 0;
+ $writedata = 0;
+}
+
+my $timeout_rsync = 60; # I/O timeout for rsync (see man rsync -> --timeout)
+my $timeout_ssh = 30; # SSH ConnectionTimeout (see man ssh_config)
+my $timeout_run = 300; # Timeout for the complete rsync/ssh execution. Since
+ # rsync and ssh have there own timeouts it should be
+ # ok to use a higher value.
+
+#system "echo $domain `date '+%FT%X'` >> /tmp/analyzer-ttt.log";
+
+my ($year,$mon,$day) = ($config{year}, $config{month}, $config{day});
+
+# Everything is UTC below!
+my $date;
+if ($config{today}) {
+ $date = DateTime->now;
+} elsif ($config{yesterday}) {
+ $date = DateTime->now->subtract(days => 1);
+} elsif ($year && $mon && $day) {
+ # Handle --day=2 and other strange things the generic way:
+ $date = DateTime->new(year => $year, month => $mon, day => $day);
+}
+if ($date) {
+ ($year,$mon,$day) =
+ ($date->strftime("%Y"), $date->strftime("%m"), $date->strftime("%d"));
+}
+
+my %hosts = get_hosts();
+my %interfaces = get_interfaces();
+my %routes = get_routes();
+
+my $db = Hades::DB->new() if $updatedb;
+
+if ($status) {
+ my %p = ( wwwdir => $wwwdir );
+ if ($date) {
+ $p{day} = $date;
+ } else {
+ warn "Status analysis enabled without a date -> Assuming today\n";
+ # today is set by Hades::Status::Toby
+ }
+ eval {
+ $status_matrix = Hades::Status::Toby->new;
+ $status_matrix->init(%p);
+ };
+ print "Error in status analysis: $@" if $@;
+}
+
+if ($year && $mon && $day) {
+ if ($copy_config) {
+ #TODO Put perhaps some error handling and debug output code here ...
+ my $wwwdestdir = "$wwwdir/$year/$mon/$day";
+ mkDir($wwwdestdir);
+ copy("$configfile", "$wwwdestdir/hades.conf");
+ }
+ my %hosts_todo = ();
+ if (@ARGV) {
+ while (my $host = shift @ARGV) {
+ $hosts_todo{$host} = 1;
+ }
+ }
+ analyze_hosts(%hosts_todo);
+} elsif (@ARGV) {
+ while (my $file = shift @ARGV) {
+ analyze_file($file);
+ }
+ # Don't write status for every file. Just after everything is finished:
+ if ($status) {
+ eval {
+ $status_matrix->store_file();
+ };
+ print "Error writing status file:
$@\n"
if $@;
+ }
+} else {
+ pod2usage(2);
+}
+
+
+exit 0;
+
+
+## END OF MAIN ##
+
+
+END {
+ $db->close_db() if $updatedb;
+}
+
+
+sub analyze_hosts {
+# use Net::SFTP;
+ use Socket;
+ use File::stat;
+
+ my %hosts_todo = @_; # "List" of hosts to be done. Empty -> all
+
+ opendir DATADIR, "$datadir/$year/$mon/$day/";
+ my @datafiles = readdir DATADIR;
+ closedir DATADIR;
+ my @zipped = ();
+ foreach my $datafile (@datafiles) {
+ if ($datafile =~ m/\.gz$/) {
+ print "Unzipping $datafile\n" if $verbose;
+ system "gzip -d $datadir/$year/$mon/$day/$datafile";
+ $datafile =~ s/\.gz$//;
+ push @zipped, $datafile;
+ }
+ }
+ foreach my $host (sort keys %hosts) {
+ # Sorting helps a little bit with concurrent analyze_hosts, that really
+ # should not occur... ;-)
+
+ next if keys %hosts_todo && !$hosts_todo{$host};
+
+ my @files = run_rsync($host);
+ if (@files) {
+ # We have files to process!
+ foreach my $file (@files) {
+ analyze_file($file);
+ }
+ if ($map) {
+ # make map(s)
+ #TODO use IPC::Run !!!
+ system "$bindir/hades-mkmap.pl --config=$configfile" .
+ ($verbose ? " --verbose" : "");
+ }
+ if ($status) {
+ eval {
+ $status_matrix->store_file();
+ };
+ print "Error writing status file: $@" if $@;
+ }
+ }
+
+ print "Sleeptime: $config{sleeptime}\n" if $verbose;
+ sleep $config{sleeptime} if $config{sleeptime} > 0;
+
+ }
+ if ($gzip) {
+ while (glob "$datadir/$year/$mon/$day/*.dat") { system "gzip $_"; }
+ } else {
+ # gzip only the files that were previously gunziped
+ foreach my $datafile (@zipped) {
+ print "Zipping $datafile\n" if $verbose;
+ system "gzip $datadir/$year/$mon/$day/$datafile";
+ }
+ }
+}
+
+
+sub run_rsync {
+ my $host = shift;
+
+ my %host = %{$hosts{$host}};
+ my $ip = ssh_ip($hosts{$host});
+ return unless defined $ip;
+ my $hostname = name($ip) || "NO DNS";
+ print "Copying from $host (".$hostname.", $ip)\n" if $verbose;
+
+ my $rsh_command = $config{rsync_rsh};
+ $rsh_command .= " -c blowfish -o ConnectTimeout=$timeout_ssh";
+ $rsh_command .= " -l " . $host{ssh_args}->{user};
+ foreach (@{$host{ssh_args}->{identity_files}}) {
+ $rsh_command .= " -i $_";
+ }
+
+ mkDir("$datadir/$year/$mon/$day");
+
+ my @rsync_command = (
+ $config{rsync_path},
+ "--verbose", "--archive", "--timeout=$timeout_rsync", "--no-whole-file",
+ "--rsh=$rsh_command",
+ $ip . ":" . $host{dat_path} . "/$year/$mon/$day/", # from
+ "$datadir/$year/$mon/$day" # to
+ );
+ print "Executing: " . join(' ', @rsync_command) . "\n" if $debug;
+ my ($out,$err);
+ eval {
+ unless (
+ run(
+
\@rsync_command,
\undef, \$out, \$err,
+ timeout(
+ $timeout_run, exception => "Running longer than $timeout_run
seconds"
+ )
+ )
+ ) {
+ print "rsync $host/$hostname/$ip: failed (exit value $?)\n";
+ }
+ };
+ if ($@) {
+ foreach (split "\n", $@) { #TODO split on \n ok?
+ print "rsync $host/$hostname/$ip: $_\n";
+ }
+ }
+ if ($err) {
+ foreach (split "\n", $err) { #TODO split on \n ok?
+ print "rsync $host/$hostname/$ip: err: $_\n";
+ }
+ }
+ return unless $out;
+ # Nothing more to process and therefore no files to analyse
+ my @files = ();
+ foreach my $file (split "\n", $out) { #TODO split on \n ok?
+ if ($file =~ m/([\w\d_-]+)\.([\w\d_-]+)\.([\w\d]+)(\.[\w\d]+)?\.dat$/) {
+ my $host1 = $1;
+ my $host2 = $2;
+ if (exists $routes{$host1}{$host2} and ! ($file =~ m/bwctl/)) { #TODO
there should be no exception for the bwctl data...
+ print "rsync $host/$hostname/$ip: copied/updated file $file\n"
+ if $debug;
+ push @files, "$datadir/$year/$mon/$day/$file";
+ }
+ } elsif ($debug) {
+ # Normally not really interesting rsync output...
+ print "rsync $host/$hostname/$ip: out: $file\n";
+ }
+ }
+ return @files;
+}
+
+sub analyze_file {
+ my $starttime = time if $verbose;
+ my $file = shift @_;
+
+ print "Processing $file ... \n" if $verbose;
+ #my $filestarttime = time;
+
+ #TODO
+ # The following code is ugly!
+ # Because Hades::Data::Finder->from_file is far from being usable here
+ # directly, we have to preprocess before using it.
+ # Some reasons:
+ # - Loading every file although nothing will be done (e.g. BWCTL)
+ # - No detection of Traceroute<->Traceroute_Raw
+ my $basename = basename($file);
+ unless ($basename =~ m/(.+)\.([\w\d]+)\.dat(\.gz)?$/) {
+ warn "WARNING: Invalid file name: $basename\n";
+ warn "WARNING: SKIPPING $basename\n";
+ return;
+ }
+ my $fileprefix = $1;
+ my $mtype = $2;
+ my $gzip = defined($3) ? 1 : 0;
+
+ # What kind of file do we have?
+ if ($mtype eq "tracert") {
+ #TODO handle this with data modules! Not trivial! See also above!
+ my ($header,@rawdata);
+ if ($gzip) {
+ open(DATAFILE, "/bin/gzip -cd -f $file|")
+ or print("Could not unzip/open \"$file\": $!\n"), return;
+ #TODO Error handling better
+ my @dummy = <DATAFILE>;
+ close DATAFILE;
+ ($header,@rawdata) = YAML::Load(join "",@dummy) or print("$!"), return;
+ #TODO Error handling
+ } else {
+ ($header,@rawdata) = YAML::LoadFile($file) or print("$!"), return;
+ #TODO Error handling
+ }
+ my $dirprefix = "$wwwdir/$header->{date}";
+ mkDir($dirprefix);
+ my $outfile = "$dirprefix/$fileprefix.tracert.dat";
+ my ($timeline,$tracerts) = process_tracertdata(@rawdata);
+ Hades::Data::Finder->zip_nstore([$header, $timeline, $tracerts],
$outfile)
+ or print("Error: Could'nt write data to $outfile\n"), return;
+ } elsif ($mtype eq "bwctl") {
+ #TODO Do something here... Use data modules...
+ } elsif ($mtype eq "pathload") {
+ #TODO Do something here... Use data modules...
+ } elsif ($mtype =~ /^\d+$/) {
+ # Handle IPPM data file (with new data modules)
+ my $finder = Hades::Data::Finder->new();
+ my $data_raw = $finder->from_file($file);
+ unless ($data_raw) {
+ print "Error opening data file '$file'";
+ if (my $warnings = $finder->{warnings}->get_string) {
+ print ":\n$warnings";
+ } else {
+ print "\n";
+ }
+ return;
+ }
+ if (my $warnings = $data_raw->{warnings}->get_string) {
+ print "Warnings occured opening data file '$file':\n$warnings";
+ }
+ unless (UNIVERSAL::isa($data_raw, 'Hades::Data::IPPM_Raw')) {
+ print "Data file '$file' is not 'Hades::Data::IPPM_Raw', but '" .
+ ref($data_raw) . "', skipping\n";
+ return;
+ }
+ my $data_agg = Hades::Data::IPPM_Aggregated->from_raw($data_raw);
+ unless ($data_agg && $data_agg->extract_data) {
+ # ^- Do conversion
+ print "Error converting raw data";
+ if ($data_agg && (my $warnings = $data_agg->{warnings}->get_string)) {
+ print ":\n$warnings";
+ } else {
+ print "\n";
+ }
+ return;
+ }
+ if (my $warnings = $data_agg->{warnings}->get_string) {
+ print "Warnings occured analyzing data file '$file':\n$warnings";
+ }
+ write_ippm_data($data_agg, $fileprefix, $mtype) or return;
+ if ($status) {
+ eval {
+ $status_matrix->analyze($data_agg);
+ };
+ print "Error in status analyzis: $@" if $@;
+ }
+ print "done\n" if $verbose;
+ } else {
+ print "WARNING: Unkown meassurement type: $mtype\n";
+ }
+ if ($verbose) {
+ my $timetaken = time - $starttime;
+ my $hr = 0;
+ while ($timetaken >= 3600) {
+ $timetaken -= 3600;
+ $hr++;
+ }
+ my $mn = 0;
+ while ($timetaken >= 60) {
+ $timetaken -= 60;
+ $mn++;
+ }
+ my $sc = $timetaken;
+ print "Done! (processing time ";
+ if ($hr > 0) {
+ print "$hr h, $mn m und $sc s)\n";
+ }
+ elsif ($mn > 0) {
+ print "$mn m und $sc s)\n";
+ }
+ else {
+ print "$sc s)\n";
+ }
+ }
+}
+
+
+sub process_tracertdata {
+
+ my (@timeline,@tracerts);
+ my $time;
+ foreach my $rawdata (@_) {
+ my @data;
+ if ($rawdata->[0] =~ /^(\S+) (\S+)$/) {
+ # Convert old data format.
+ #TODO remove sometime in the future?
+ my ($y,$mo,$d) = split "-", $1;
+ my ($h,$mi,$s) = split ":", $2;
+ $time = DateTime->new(
+ year=>$y, month=>$mo, day=>$d, hour=>$h, minute=>$mi, second=>$s,
+ time_zone => "Europe/Berlin"
+ )->epoch;
+ } else {
+ $time = $rawdata->[0];
+ }
+ unless (exists $rawdata->[1]->[0] && defined $rawdata->[1]->[0]) {
+ # There is an empty list, where you normally expect data, error
messages,
+ # or marks. This should not happen with recent hades-traceroute
daemons,
+ # but you can find it in old data files. So the following code prevents
+ # Perl warnings about undef values.
+ $rawdata->[1] = ["ERROR","No valid output from traceroute"];
+ }
+ next if $rawdata->[1]->[0] =~ /^MARK/;
+ # A mark was found -> $time & $date already set -> skip entry
+ if ( $rawdata->[1]->[0] =~ /^ERROR/ ) {
+ # Error during traceroute -> create special entry
+ push @timeline, {
+ "time" => $time, "ref" => -1 ,
+ "error" => $rawdata->[1]->[1] # = error message
+ };
+ next;
+ }
+ my $tracertref = -1; # number refering from an entry in @timeline
+ # to the entry in @tracerts
+ foreach (@{$rawdata->[1]}) {
+ next unless /^(\d+):([^:]+):(.+)$/;
+ #TODO What if it fails? Error message?
+ # Keep in mind: We don't have $hop!
+ # Should normally NOT happen! Was running for years with the
simple
+ # split ':' solution without using "next" on errors!
+ my ($hop,$name,$ip) = ($1,$2,$3);
+ $data[$hop-1] = { name => $name, ip => $ip };
+ }
+ # Look if this route already occured:
+ for (my $i=0 ; $i<=$#tracerts ; $i++) {
+ if (
equal_tracerts(\@data,
$tracerts[$i]) ) {
+ $tracertref = $i;
+ last;
+ }
+ }
+ if ($tracertref == -1) {
+ # New route
+ push @tracerts,
\@data;
+ $tracertref = $#tracerts;
+ }
+ push @timeline, { "time" => $time, "ref" => $tracertref };
+ }
+ # The next steps are only possible if the epoch time is available.
+ # This prevents an error messages sometimes seen, when run for the
+ # current day. Most likely this is related to an temporarly incomplete
+ # data file.
+ return unless defined $time;
+ if ( $time % 86400 >= 82800 ) { # 86400 = 24*60*60 ; 82800 = 23*60*60
+ # $time in the last hour of the day
+ # => Last data or MARK are most likely the "last of the day"
+ $time = floor($time / 86400) * 86400 + 86400;
+ }
+ push @timeline,
+ { "time" => $time, "ref" => undef };
+ return
\@timeline,
\@tracerts;
+}
+
+
+#TODO I deserve my own module! See also inline TODO
+sub write_ippm_data {
+ my ($data_obj, $fileprefix, $mid) = @_;
+
+ my $data = $data_obj->get_data;
+ if (ref($data) ne "ARRAY" || $#$data < 0) {
+ print "No data!\n" if $verbose; # Not really useful for cron job!
+ return;
+ }
+
+ #TODO The following code for output directory (fdate) detection
+ # is not really sophisticated! It would be much better to have it handled
+ # by a correct framework (Finder<->Reader<->Writer) somehow ...
+ my $date = DateTime->from_epoch(epoch => $data_obj->{start_time});
+ my $fyear = $date->year;
+ my $fmonth = $date->strftime('%m');
+ my $fday = $date->strftime('%d');
+
+ my $infofile = "$fileprefix.$mid.info.dat";
+ my $datafile = "$fileprefix.$mid.qos_ai.dat";
+
+ my $dirprefix = "$wwwdir/$fyear/$fmonth/$fday";
+ mkDir($dirprefix);
+
+ my ($cur_infofile, $cur_datafile, $cur3h_infofile, $cur3h_datafile);
+ if ($current) {
+ mkDir("$wwwdir/current");
+ #$cur_infofile = "$wwwdir/current/$infofile";
+ $cur_datafile = "$wwwdir/current/$datafile";
+ mkDir("$wwwdir/current-3h");
+ #$cur3h_infofile = "$wwwdir/current-3h/$infofile";
+ $cur3h_datafile = "$wwwdir/current-3h/$datafile";
+ }
+
+ $infofile = "$dirprefix/$infofile";
+ $datafile = "$dirprefix/$datafile";
+
+ #TODO Instead of doing file storing "by hand" and "misusing" file2db, there
+ # should be dedicated objects/methods. See also TODOs on the possible
+ # Hades::Data::Finder rework and the TODO about data2db in Hades::DB.
+
+ my $data_info = {
+ version => 1,
+ sender_name => $data_obj->get_sender,
+ receiver_name => $data_obj->get_receiver,
+ %{$data_obj->get_meta},
+ %{$data_obj->get_statistics},
+ };
+
+ if ($writedata) {
+ (nstore($data_info, $infofile))
+ or print("Error: Couldn't write data to $infofile\n"), return;
+ #DumpFile($infofile, $data_info); # TODO handle parse errors (catch
die...)
+ (Hades::Data::Finder->zip_nstore($data, $datafile))
+ or print("Error: Couldn't write data to $datafile\n"), return;
+ if ($current) {
+ #(nstore($data_info, $cur_infofile))
+ # or print("Error: Couldn't write data to $cur_infofile\n"), return;
+ (nstore([$data_obj->get_from(time() - 3600)], $cur_datafile))
+ or print("Error: Couldn't write data to $cur_datafile\n"), return;
+ #(nstore($data_info, $cur3h_infofile))
+ # or print("Error: Couldn't write data to $cur3h_infofile\n"), return;
+ (nstore([$data_obj->get_from(time() - 10800)], $cur3h_datafile))
+ or print("Error: Couldn't write data to $cur3h_datafile\n"), return;
+ }
+ }
+ if ($updatedb) {
+ $db->file2db( info => $data_info,
+ year => $fyear, month => $fmonth, day => $fday, mid => $mid
+ ) or print("Error writing to database!\n"), return;
+ }
+ return 1;
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-analyzer.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-analyzer.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[--[no]debug|-d]>
+ S<[B<--[no]new>|B<-n>]>
+ S<F<raw data files ...>>
+
+B<hades-analyzer.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[--[no]debug|-d]>
+ S<[B<--[no]new>|B<-n>]>
+ S<[B<--sleeptime>=I<SEC>]>
+ S<B<--today>> | S<B<--yesterday>> | S<B<--day>=I<DD>> S<B<--month>=I<MM>>
S<B<--year>=I<YYYY>>
+ S<F<hosts to analyze ...>>
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+B<hades-analyzer.pl> has two modes:
+
+=over
+
+=item 1.
+
+Analyze data file(s) taken from command line.
+
+=item 2.
+
+Get the the data files from measurement boxes and analyze them afterwards.
+
+=back
+
+In order to enable the second mode, you have to specify S<B<--today>>,
+S<B<--yesterday>>, or S<B<--day>>, S<B<--month>>, and S<B<--year>>.
+Further non-option parameters
+on the command line are then treated as box names. Data will only be fetched
+and analyzed from these boxes. If no non-option parameters exist, data from
+all boxes will be fetched and analyzed.
+
+B<Important:> File boundaries are in UTC! Therefore there is no flexible
+S<B<--date>> parameter. It would only introduce time zone problems. So the
+date you specify using S<B<--day>>, S<B<--month>>, and S<B<--year>> is
+implicitly UTC.
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional information.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--today>
+
+Copy and analyze data of today. Overwrites S<B<--yesterday>>, S<B<--day>>,
+S<B<--month>>, and S<B<--year>>.
+
+Configuration file: n.a.
+
+Default: not set
+
+
+=item B<--yesterday>
+
+Copy and analyze data of yesterday. Overwrites S<B<--day>>,
+S<B<--month>>, and S<B<--year>>.
+You normally use at least S<B<--gzip>> when using this option. It is not used
+automatically! You have use it explicitly.
+
+Configuration file: n.a.
+
+Default: not set
+
+
+=item S<B<--day>=I<DD>> S<B<--month>=I<MM>> S<B<--year>=I<YYYY>>
+
+Copy and analayze data of DD.MM.YYYY.
+
+Configuration file: n.a.
+
+Default: not set
+
+
+=item B<--[no]new>|B<-n>
+
+Already existing analyzer results will be discarded.
+
+Configuration file: n.a.
+
+Default: disabled
+
+
+=item B<--[no]debug>|B<-d>
+
+Print additional information.
+
+Configuration file: n.a.
+
+Default: disabled
+
+
+=item B<--wwwdir>=F<PATH>
+
+Use F<PATH> as output directory for generated data.
+
+Configuration file: C<$wwwdir>
+
+Default: F<[basedir]/www>
+
+
+=item B<--sleeptime>=-I<SEC>
+
+Sleeptime I<SEC> seconds after every host.
+
+Configuration file: C<$sleeptime>
+
+Default: 0
+
+
+=item B<--[no]updatedb>
+
+Insert the extracted meta data into the meta data database. This is very
+important! But since it needs only to be done once a day, it is normally
+enough that B<hades-analyzed> is doing it. But if you are reanalysing whole
+days or if you are not using B<hades-analyzed> at all, you should really care
+about this option!
+
+Configuration file: n.a.
+
+Default: 0
+
+
+=item B<--[no]copy-config>
+
+Copy the (current) configuration file to the data directory. This is normally
+only useful when executed by B<hades-analyzed>. The option is ignored, when
+data files are set directly via command line options.
+
+Configuration file: n.a.
+
+Default: 0
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+ $ hades-analyzer.pl --year=2003 --month=04 --day=23
+
+ $ hades-analyzer.pl --today
+
+
+
Property changes on: trunk/build/HADES/bin/hades-analyzer.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-cfg-create.pl
===================================================================
--- trunk/build/HADES/bin/hades-cfg-create.pl (rev
0)
+++ trunk/build/HADES/bin/hades-cfg-create.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,1159 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# IMPORTANT: The scheduling algorithm ensures that there are no concurrent
+# Hades measurements on a box. Indeed it is most likely not a
+# problem doing measurements at the same time on different
+# interfaces of one box. But this is unsure and the interface
+# detection based on the ip address even fails if there are
multiple
+# addresses for the same interface or the IPv4 and IPv6 addresses
of
+# the same interface are both used for doing measurements (they
+# can obviously not be matched easily!).
+# By using the ssh ip, it is much easier, because you just have to
+# make sure that it is the same in all domains.
+# Nevertheless this can be changed changed to allow one
measurement
+# per interface at the same time.
+# See the variable $host_based_scheduling and the comments below.
+# There are also two different ways of port allocation. See the
+# variable $fixed_source_ports and the comments below.
+
+# TODO
+# - Create a new mode --check for usage with hades-check-cron.sh
+# At the moment --reload is doing something somewhat wrong for
traceroute...
+# - host_based_scheduling as configuration file variable?
+# - When renaming or removimg Boxes from config file, there are orphaned
rsync
+# directories left behind! Clean them...
+# - Problem: ssh and interface ips are implicitly compared using string
+# compare! This is used for port and interface calculation (hash keys).
+# This is problematic for IPv6 !!!
+# - The hard coded $tracertopts are not really the best way
+# => Think about it! But be careful ;-)
+# - Perhaps completely different with Bit::Vector ??
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+#use Storable;
+use Pod::Usage;
+use File::Path;
+use File::Basename;
+use IPC::Run qw(run timeout timer);
+use Socket;
+
+use Hades;
+
+create_config(
+ 'dry-run' => 0,
+ 'update|u!' => 1,
+ 'delete|d' => 0,
+ 'sync|s|copy|c' => 0,
+ 'reload|r' => 0,
+ 'all' => 0,
+ 'sshuser=s' => undef,
+ 'sshinteractive!' => 1,
+ 'sshmode=s' => "ssh", # "perl" or "ssh"
+ 'plot:s' => undef,
+ 'plot-all!' => 0,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $cfgsdir = $config{cfgsdir};
+my @codomains = @{$config{codomains}};
+my $portbase = $config{portbase};
+my $portmax = $config{portmax};
+
+my $dryrun = $config{dry_run};
+my $update = $config{update};
+my $delete = $config{delete};
+my $sync = $config{sync};
+my $reload = $config{reload};
+my $domain = $config{domain};
+if ($config{all}) {
+ $delete = $reload = $sync = 1;
+}
+my %active_conf;
+
+my %ssh_args = ();
+$ssh_args{user} = $config{user} if $config{sshuser};
+$ssh_args{interactive} = "1" if $config{sshinteractive};
+
+my $sshcommand = "ssh ";
+
+unless ($config{sshmode} eq "perl" || $config{sshmode} eq "ssh") {
+ warn "Unknown ssh mode: $config{sshmode}\nExpected: perl|ssh\n\n";
+ pod2usage(2);
+}
+
+my %hosts = get_hosts();
+my %interfaces = get_interfaces();
+my %routes = get_routes();
+
+# There are two different ways of doing resource protection/allocation. You
can
+# do scheduling and port allocation based on interfaces or hosts. Port
+# allocation heavily depends on the sender/receiver software, especially the
+# receiver. Is the receiver binding on the port for one interface or for all?
+# For scheduling it would, of course, be the best to have only exactly one
+# measurement running at the same time on one box. But this soon leads to a
+# lack of available intervals with boxes having more than one measurement
+# interface! The alternative is to only make sure that there is only one
+# measurement per interface. This should be no problem especially with modern
+# (multi core) PC architecture. But this is also problematic! See comment at
+# the beginning of this file.
+# By setting $host_based_scheduling to false/0 you can disable host based
+# scheduling and enable interface based scheduling if you REALLY want...
+my $host_based_scheduling = 1;
+
+# For the current SARIMI version only exactly one fixed destination port is
+# necessary for every measurement on each host. The source port is, as usual,
+# arbitrary. But older versions of SARIMI used exactly *one* fixed port
number
+# as source and destination port for every measurement on sender and
receiver.
+# Of course this has an influence on the port evaluation done in this script.
+# At the moment we normally still use the old way of allocating destination
AND
+# source port, because you normally have enough ports available and this way
+# we are prepared for everything. If you are running out of ports, you can
+# set $fixed_source_ports to 0 to make a lot more ports available.
+my $fixed_source_ports = 1;
+
+# Build ssh ip -> host id hash
+my %sship2hostid = ();
+while (my ($hostid, $host_ref)= each %hosts) {
+ if (defined (my $sship = ssh_ip($host_ref))) {
+ $sship2hostid{$sship} = $hostid;
+ }
+}
+# Build interface ip -> interface id hash
+# Only needed for interface based scheduling that is normally not enabled!
+my %ifip2ifid = ();
+unless ($host_based_scheduling) {
+ while (my ($ifid, $if_ref)= each %interfaces) {
+ if (defined (my $ifip = $if_ref->{ip})) {
+ $ifip2ifid{$ifip} = $ifid;
+ }
+ }
+}
+
+my %used_ports = ();
+my $interval_common = 1;
+ # The "great interval": The lcm of all intervals. This way all intervals
+ # of all measurements should "fit"
+my %configfiles = ();
+ # All configfiles noted here are ok, others are deleted!!
+my %cfgfiles_by_host = ();
+ # For rsync we need a list of files to sync.
+my %intervals = ();
+ # Allocated intervals per resource (as key)
+
+# All variables that can be found in a configuration file should be
+# declared here and should be mapped to the entry in the info hash.
+# At some places in write_configfile() and read_configfile() you still have
+# to edit lists to make sure the variables are really read/checked/written!!
+my %cfg_vars = (
+ #cfgfile info hash name
+ domain => { name => "domain", type => "string" },
+ datadir => { name => "dat_path", type => "string" },
+ pidpath => { name => "pid_path", type => "string" },
+ datafile => { name => "datafile", type => "string" },
+ senderip => { name => "senderip", type => "string" },
+ receiverip => { name => "receiverip", type => "string" },
+ sendername => { name => "sender", type => "string" },
+ receivername => { name => "receiver", type => "string" },
+ mid => { name => "mid", type => "int" },
+ port => { name => "port", type => "int" },
+ packetsize => { name => "packetsize", type => "int" },
+ precedence => { name => "precedence", type => "string" },
+ interval => { name => "interval", type => "int" },
+ packetgroupsize => { name => "groupsize", type => "int" },
+ packetoffset => { name => "packetinterval", type => "int" },
+ transmittime => { name => "transmittime", type => "int" },
+ sendingoffset => { name => "offset", type => "int" },
+ sendingoffset_end => { name => "offset_end_snd", type => "int" },
+ receivingoffset_end => { name => "offset_end_rcv", type => "int" },
+ intervalcommon => { name => "interval_common", type => "int" },
+ verbose => { name => "verbose", type => "int" },
+ www_map => { name => "map", type => "int" },
+ alert => { name => "alert", type => "int" },
+);
+
+
+my %cocfgs = ();
+
+if (@codomains) {
+ print "\nInitializing co-domains ...\n\n";
+ foreach my $codomain (@codomains) {
+ next if exists $cocfgs{$codomain} || $codomain eq $domain;
+ # domain already loaded
+ print "Loading $codomain ...\n" if $verbose;
+ my $cfg = Hades::Config->new(configfile => $codomain, use_argv => 0);
+ $cfg->init() or die "Cannot load config \"$codomain\"!\n";
+ $cocfgs{$codomain} = $cfg;
+ push @codomains, @{$cfg->{config}->{codomains}};
+ }
+}
+
+
+# Since we have to run through the data (%routes) twice, it's better
+# to create a new list containing all necessary/evaluated data. Because we
+# need to go through all measurements sorted by interval length, we put
+# the data in an hash with the interval length as key. Since a lot of
+# measurements have the same interval length the values of the hash are
+# references to arrays.
+my %routes_list = ();
+
+
+print "\nGathering information from configuration files ...\n\n";
+
+my $codomain = 0; # First domain is not a co-domain!
+foreach my $cfg ($config, sort values(%cocfgs)) {
+ print "\nCo-domain \"$cfg->{config}->{domain}\" ...\n" if $codomain;
+ while (my ($sif, $recv_ref)= each %{$cfg->{config}->{routes}}) {
+ my $shost = $cfg->{config}->{interfaces}->{$sif}->{host};
+ my $sip = $cfg->{config}->{interfaces}->{$sif}->{ip};
+ my $sid = $sif;
+ my $shid = $cfg->hostid($shost);
+ unless (defined $shid) {
+ warn "No host provides interface $sif used as sender interface " .
+ "in \%routes\n in domain $cfg->{config}->{domain}" .
+ " - Skipping entry\n";
+ next;
+ }
+ # Map sender hosts and interfaces from co-domains to hosts and interfaces
+ # in main domain.
+ if ($codomain) {
+ # Use ssh ip to find common hosts.
+ my $nshid = $sship2hostid{ssh_ip($shost)};
+ print " Matching host: $shid -> $nshid\n"
+ if $debug && $shid && $nshid;
+ if ($config{plot_all}) {
+ $shid = $nshid || $shid;
+ } else {
+ $shid = $nshid;
+ }
+ # Use interface ip to find common interfaces.
+ unless ($host_based_scheduling) {
+ my $nsid = $ifip2ifid{$sip};
+ print " Matching interface: $sid -> $nsid\n"
+ if $debug && $sid && $nsid;
+ if ($config{plot_all}) {
+ $sid = $nsid || $sid;
+ } else {
+ $sid = $nsid;
+ }
+ }
+ }
+ while (my ($rif, $mids_ref)= each %{$recv_ref}) {
+ my $rhost = $cfg->{config}->{interfaces}->{$rif}->{host};
+ my $rip = $cfg->{config}->{interfaces}->{$rif}->{ip};
+ my $rid = $rif;
+ my $rhid = $cfg->hostid($rhost);
+ unless (defined $rhid) {
+ warn "No host provides interface $rif used as sender interface " .
+ "in \%routes\n in domain $cfg->{config}->{domain}" .
+ " - Skipping entry\n";
+ next;
+ }
+ # Map receiver hosts and interfaces from co-domains to hosts and
+ # interfaces in main domain.
+ # If neither sender nor receiver hosts are part of the
+ # main domain, we skip the route (unless "plot-all").
+ if ($codomain) {
+ # Use ssh ip to find common hosts.
+ my $nrhid = $sship2hostid{ssh_ip($rhost)};
+ print " Matching host: $rhid -> $nrhid\n"
+ if $debug && $rhid && $nrhid;
+ if ($config{plot_all}) {
+ $rhid = $nrhid || $rhid;
+ } else {
+ $rhid = $nrhid;
+ next unless defined $shid || defined $rhid;
+ }
+ # Use interface ip to find common interfaces.
+ unless ($host_based_scheduling) {
+ my $nrid = $ifip2ifid{$rip};
+ print " Matching interface: $rid -> $nrid\n"
+ if $debug && $rid && $nrid;
+ if ($config{plot_all}) {
+ $rid = $nrid || $rid;
+ } else {
+ $rid = $nrid;
+ }
+ }
+ }
+ for ( 0 .. $#{$mids_ref} ) {
+ my $mid = $_;
+ my $interval = $mids_ref->[$mid]->{interval};
+ $interval_common = lcm($interval_common,$interval);
+ my $info = {
+ %{$mids_ref->[$mid]}, # All data from central config
+ # Additionally helpful:
+ sender => $sif,
+ senderip => $sip,
+ shid => $shid,
+ sid => $sid, # eq $sender if $host_based_scheduling
+ receiver => $rif,
+ receiverip => $rip,
+ rhid => $rhid,
+ rid => $rid, # eq $receiver if $host_based_scheduling
+ mid => $mid,
+ dat_path => $rhost->{dat_path},
+ pid_path => $rhost->{pid_path},
+ datafile => "$sif.$rif.$mid.dat",
+ cfgsdir => $cfg->{config}->{cfgsdir},
+ domain => $cfg->{config}->{domain},
+ codomain => $codomain,
+ };
+ read_configfile($info);
+ $routes_list{$interval} = [] unless exists $routes_list{$interval};
+ push @{$routes_list{$interval}}, $info;
+ }
+ }
+ }
+ $codomain = 1; # Next domain is a co-domain
+}
+
+
+print "\nAdding cached scheduling and port information ...\n\n";
+
+foreach (values %routes_list) {
+ foreach my $info (@{$_}) {
+ # First the port:
+ if (defined $info->{port}) {
+ if ($fixed_source_ports) {
+ if (defined $info->{shid}) { # Possibly undef for co-domain routes
+ if ($used_ports{$info->{shid}}->{$info->{port}}) {
+ die(
+ ($info->{codomain} ?
+ "Co-domain $info->{domain} out of sync: " : "") .
+ "Port $info->{port} used at least twice on $info->{shid}!" .
+ " - Please fix it or delete file!\n"
+ );
+ }
+ $used_ports{$info->{shid}}->{$info->{port}} = 1; # Mark port as
used
+ }
+ }
+ if (defined $info->{rhid}) { # Possibly undef for co-domain routes
+ if ($used_ports{$info->{rhid}}->{$info->{port}}) {
+ die(
+ ($info->{codomain} ?
+ "Co-domain $info->{domain} out of sync: " : "") .
+ "Port $info->{port} used at least twice on $info->{rhid}!" .
+ " - Please fix it or delete file!\n"
+ );
+ }
+ $used_ports{$info->{rhid}}->{$info->{port}} = 1; # Mark port as used
+ }
+ }
+ # Now the scheduling:
+ next unless defined $info->{offset}; # We need a new one!
+ my $interval = $info->{interval};
+ my $offset = $info->{offset};
+ my $offset_end_snd = $info->{offset_end_snd};
+ my $offset_end_rcv = $info->{offset_end_rcv};
+ for ( my $i=1 ; $i <= $interval_common/$interval ; $i++ ) {
+ if (defined $info->{shid}) { # Possibly undef for co-domain routes
+ add_interval(
+ ($host_based_scheduling ? $info->{shid} : $info->{sid}),
+ $offset+$interval*($i-1), $offset_end_snd+$interval*($i-1))
+ or die "Cannot add cached interval for measurement:\n" .
+ " $info->{sender} -> $info->{receiver} ($info->{mid})\n" .
+ "Collision with previously added measurement!\n" .
+ "Either this is a severe internal error " .
+ "or domains are out of sync!\n";
+ }
+ if ($offset_end_rcv != 0 &&
+ defined $info->{rhid}) { # Possibly undef for co-domain routes
+ add_interval(
+ ($host_based_scheduling ? $info->{rhid} : $info->{rid}),
+ $offset+$interval*($i-1), $offset_end_rcv+$interval*($i-1))
+ or die "Cannot add cached interval for measurement:\n" .
+ " $info->{sender} -> $info->{receiver} ($info->{mid})\n" .
+ "Collision with previously added measurement!\n" .
+ "Either this is a severe internal error " .
+ "or domains are out of sync!\n";
+ }
+ }
+ }
+}
+
+
+print "\nCalculating missing data ...";
+
+# Now sort by interval length: shortest interval first to make comparison
+# easier!
+if ($update) {
+ print "\n\n";
+ foreach (sort {$a<=>$b} keys %routes_list) {
+ foreach my $i ( @{$routes_list{$_}} ) {
+ # First find a free port if necessary
+ unless (defined $i->{port}) {
+ print "New port: \t$i->{sender}\t$i->{receiver}\t$i->{mid}\n"
+ if $verbose;
+ for ($portbase .. $portmax) {
+ next if $fixed_source_ports && $used_ports{$i->{shid}}->{$_};
+ next if $used_ports{$i->{rhid}}->{$_};
+ $i->{port} = $_;
+ # mark port as used:
+ $used_ports{$i->{shid}}->{$_} = 1 if $fixed_source_ports;
+ $used_ports{$i->{rhid}}->{$_} = 1;
+ last;
+ }
+ die "No more free ports available!\n" unless defined $i->{port};
+ }
+ # Already an offset calculated?
+ next if defined $i->{offset};
+ print "New offset: \t$i->{sender}\t$i->{receiver}\t$i->{mid}\n"
+ if $verbose;
+ my $length_snd = $i->{groupsize} * $i->{packetinterval};
+ # => safety factor is one packet interval
+ my $length_rcv = $length_snd + $i->{transmittime};
+ if ( $i->{shid} eq $i->{rhid} ) {
+ $length_snd = $length_snd > $length_rcv ? $length_snd : $length_rcv;
+ $length_rcv = 0;
+ }
+ $i->{offset} = alloc_interval(
+ ($host_based_scheduling ?
+ ($i->{shid}, $i->{rhid}) : ($i->{sid}, $i->{rid})),
+ $i->{interval}, $length_snd, $length_rcv
+ );
+ die "No more appropriate space left for interval!\n" .
+ "Route: " . $i->{sender} . "->" . $i->{receiver} . "\n" .
+ "Interval: " . $i->{interval} . "\n" .
+ "S-Length: " . $length_snd . "\nR-Length: " . $length_rcv . "\n"
+ if $i->{offset} == -1;
+ $i->{offset_end_snd} = $i->{offset} + $length_snd-1;
+ $i->{offset_end_rcv} = $length_rcv != 0 ? $i->{offset} + $length_rcv-1
: 0;
+ }
+ }
+} else {
+ print " SKIPPED\n\n";
+}
+
+
+print "\nGenerating ploticus interval diagram ...";
+
+if (!defined $config{plot}) {
+ print " SKIPPED\n\n";
+} else {
+ print "\n\n";
+ plot_interval(
+ file => $config{plot},
+ interval => $interval_common,
+ intervals => \%intervals,
+ );
+}
+
+
+print "\nWriting configuration files ...";
+
+if ($dryrun || !$update) {
+ print " SKIPPED\n\n";
+} else {
+ print "\n\n";
+ mkDir($cfgsdir);
+ foreach (values %routes_list) {
+ foreach (@{$_}) {
+ write_configfile($_);
+ }
+ }
+}
+
+
+print "\nDeleting old configuration files ...";
+
+if ($delete) {
+ print " - not really (dry-run)" if $dryrun;
+ print "\n\n";
+ while (<$cfgsdir/*>) {
+ next unless -f;
+ next if $configfiles{$_};
+ print " $_\n";
+ unless ($dryrun) {
+ if (unlink($_) < 1) {
+ warn "Cannot delete $_: $!\n";
+ }
+ }
+ }
+} else {
+ print " SKIPPED\n\n";
+}
+
+
+print "\nSyncing configuration files ...";
+
+if ($sync && !$dryrun) {
+ print "\n\n";
+ my @rsync_command = ( $config{rsync_path},
+ "--compress", "--times", "--copy-links", "--recursive", "--delete",
+ "--rsh=$config{rsync_rsh}",
+ );
+ push @rsync_command, "--verbose" if $verbose;
+ push @rsync_command, "--stats" if $debug;
+ foreach my $hostname (sort keys %hosts) {
+ my $ip = ssh_ip($hosts{$hostname});
+ next unless $ip;
+ my $files = $cfgfiles_by_host{$hostname};
+ unless (@{$files}) {
+ warn "No configuration file copied to $hostname!\n";
+ next;
+ }
+ print "$hostname ($ip) ...\n";
+ rmtree("$cfgsdir/$hostname");
+ mkDir("$cfgsdir/$hostname");
+ foreach (@$files) {
+ symlink $_->[0],"$cfgsdir/$hostname/".$_->[1]
+ or warn "Cannot create symlink: $!\n";
+ }
+ my ($out,$err);
+ run(
+
[@rsync_command,
+ "$cfgsdir/$hostname/", "$ip:$hosts{$hostname}->{cfg_path}"],
+ \undef, \$out, \$err, timer(30)
+ #TODO timeout() throws exception, but timer() does nothing during
run()
+ ) or print " RSYNC FAILED (exit value $?)\n";
+ print " $out" if $out;
+ print " ERROR $err" if $err;
+ }
+} else {
+ print " SKIPPED\n\n";
+}
+
+
+
+print "\nReloading Hades configuration files ...";
+
+if ($reload && !$dryrun) {
+ print "\n\n";
+ foreach my $hostname (sort keys %hosts) {
+ my $ip = ssh_ip($hosts{$hostname});
+ next unless $ip;
+ print "$hostname ($ip) ...\n";
+ my $command = "/etc/init.d/hades reload ; " .
+ "service hades-traceroute status || service hades-traceroute start";
+ # This is NOT necessary for a reload, but useful for
hades-check-cron.sh
+ print " $command\n" if $verbose;
+ if ($config{sshmode} eq "ssh") {
+ system "$sshcommand -l root $ip \'$command\'";
+ if ($? == -1) {
+ print "SSH EXECUTION FAILED: $!\n";
+ } elsif ($? & 127) {
+ printf "SSH DIED with signal %d, %s coredump\n",
+ ($? & 127), ($? & 128) ? 'with' : 'without';
+ } elsif ($? >> 8 != 0) {
+ printf "SSH EXITED with value %d\n", $? >> 8;
+ }
+ } else {
+ my $result = ssh_cmd($hosts{$hostname},\%ssh_args,$command);
+ if (!defined $result) {
+ warn ssh_err_msg();
+ } elsif ($result!=0) {
+ warn "error (remote command): " . $result . "\n"; #TODO error message
+ }
+ }
+ }
+} else {
+ print " SKIPPED\n\n";
+}
+
+
+exit 0;
+
+
+
+### END OF MAIN ###
+
+
+sub gcd {
+ my ($a,$b) = @_;
+
+ my $rem;
+ do {
+ $rem = $a % $b;
+ $a = $b;
+ $b = $rem;
+ } until $rem == 0;
+ return $a;
+}
+
+#sub gcd {
+# return gcd($_[0] - $_[1], $_[1]) if ($_[0] > $_[1]);
+# return gcd($_[0], $_[1] - $_[0]) if ($_[0] < $_[1]);
+# $_[0];
+#}
+
+sub lcm {
+ my ($a,$b) = @_;
+
+ return $a*$b/gcd($a,$b);
+}
+
+
+# No check for end-of-interval
+sub add_interval {
+ my ($host,$begin,$end) = @_;
+ my $cur_int = $intervals{$host}; # undef if empty
+ my $prev_int = undef;
+ while ( defined($cur_int) && $cur_int->{begin} <= $end ) {
+ $prev_int = $cur_int;
+ $cur_int = $cur_int->{next};
+ }
+ # Check if inserting is ok
+ if (defined $prev_int && $prev_int->{end} >= $begin) {
+ return;
+ }
+ my $new_int = { begin => $begin, end => $end, next => $cur_int };
+ if (defined $prev_int) {
+ $prev_int->{next} = $new_int;
+ } else {
+ # $new_int is "head"
+ $intervals{$host} = $new_int;
+ }
+ return 1;
+}
+
+sub alloc_interval {
+ my ($sender,$receiver,$interval,$length_snd,$length_rcv) = @_;
+ print "alloc $sender $receiver\n" if $debug;
+
+ # First find enough empty space for sender
+ my $cur_int_snd = $intervals{$sender};
+ my $prev_int_snd = undef;
+ my $cur_int_rcv = $intervals{$receiver};
+ my $prev_int_rcv = undef;
+ my $min_begin = 0;
+ while ( $min_begin+$length_rcv < $interval ) {
+ if ( !defined($cur_int_snd) ||
+ ($cur_int_snd->{begin} - $min_begin + 1 >= $length_snd) ) {
+ # We found a free candidat for sender!
+ my $max_end = $cur_int_snd ? $cur_int_snd->{begin}-1 : $interval;
+ print "Sender:min/max $min_begin/$max_end\n" if $debug;
+ my $begin = undef; # Set appropriate if found in receiver
+ if ( $length_rcv == 0 ) {
+ # Only one intervall to allocate (most likely sender==receiver
+ print "Sender==Receiver -> skipping alloc of receiver\n" if $debug;
+ $begin = $min_begin;
+ } else {
+ #if (defined $cur_int_snd->{next}) {
+ # # not last entry => check all intervals and recalculate $begin
+ # #"andere" Intervalle testen!!!
+ # # nicht noetig, wenn sortiert !!????
+ #}
+ # Search receiver for empty space of $length_rcv in
$min_begin..$max_end
+ #
+ # First go through list to reach $min_begin
+ while ( defined($cur_int_rcv) # More to go?
+ && $cur_int_rcv->{begin} < $min_begin+$length_rcv-1) {
+ $prev_int_rcv = $cur_int_rcv;
+ $cur_int_rcv = $cur_int_rcv->{next};
+ }
+ # Now look for an appropriate interval up to $max_end
+ my $min_begin_rcv =
+ defined($prev_int_rcv) ?
+ ( $prev_int_rcv->{end}+1 > $min_begin ?
+ $prev_int_rcv->{end}+1 : $min_begin )
+ : $min_begin;
+ print "Receiver:min $min_begin_rcv\n" if $debug;
+ while ( $min_begin_rcv+$length_snd-1 <= $max_end ) {
+ if ( !defined($cur_int_rcv) ||
+ ($cur_int_rcv->{begin} - $min_begin_rcv + 1 >= $length_rcv) )
{
+ # SUCCESS!
+ $begin = $min_begin_rcv;
+ # Allocate all intervals
+ for ( my $i=1 ; $i <= $interval_common/$interval ; $i++ ) {
+ print "Receiver:interval $begin+$interval*" . ($i-1) . "\n"
+ if $debug;
+ add_interval($receiver,
+ $begin+$interval*($i-1),
+ $begin+$interval*($i-1)+$length_rcv-1)
+ or die "Internal Error"; # Interval was checked before!!!
+ }
+ last;
+ }
+ $prev_int_rcv = $cur_int_rcv;
+ $cur_int_rcv = $cur_int_rcv->{next};
+ $min_begin_rcv = $prev_int_rcv->{end}+1;
+ # We asume "next" slot as first
+ }
+ }
+ if (defined($begin)) {
+ # Found an interval and allocated it for receiver.
+ # Now allocate all intervals for sender:
+ for ( my $i=1 ; $i <= $interval_common/$interval ; $i++ ) {
+ print "Sender:interval $begin+$interval*" . ($i-1) . "\n" if
$debug;
+ add_interval($sender,
+ $begin+$interval*($i-1),
+ $begin+$interval*($i-1)+$length_snd-1)
+ or die "Internal Error"; # Interval was checked before!!!
+ }
+ print "alloc result $begin\n" if $debug;
+ return $begin;
+ }
+ }
+ if (!defined($cur_int_snd)) {
+ # We are already at the end of free space of sender and nothing
+ # appropriate was found for receiver => FAIL
+ last;
+ }
+ $prev_int_snd = $cur_int_snd;
+ $cur_int_snd = $cur_int_snd->{next};
+ $min_begin = $prev_int_snd->{end}+1; # We asume "next" slot as first
+ }
+ # It is not possible to find enough space in interval
+ print "alloc FAILED\n" if $debug;
+ return -1;
+}
+
+
+sub parse_configfile {
+ my $file = shift;
+
+ # The following hash will be filled during configfile read. All variables
+ # should not be undef, because of "use warnings".
+ my %info = (
+ dat_path => "",
+ pid_path => "",
+ datafile => "",
+ senderip => "",
+ receiverip => "",
+ sender => "",
+ receiver => "",
+ mid => 0,
+ packetsize => 0,
+ precedence => "",
+ interval => 0,
+ groupsize => 0,
+ packetinterval => 0,
+ transmittime => 0,
+ verbose => 0,
+ # The following values are calculated and have to be created if they
+ # do not exist or perhaps if one of the other values changed.
+ # They should (of course) not have default values.
+ #port => undef,
+ #offset => undef,
+ #offset_end_snd => undef,
+ #offset_end_rcv => undef,
+ #interval_common => undef,
+ );
+ unless (open(INFILE, "<$file")) {
+ warn "SYSTEM ERROR: Unable to open file $file: $!\n";
+ return undef;
+ }
+ while (<INFILE>) {
+ chomp; # no newline
+ #s/#.*//; # no comments #### TODO REALLY REALLY BAD !!!!
+ s/^\s+//; # no leading white
+ s/\s+$//; # no trailing white
+ next unless length; # anything left?
+ my ($var,$value) = split /\s*=\s*/,$_,2;
+ if (defined $cfg_vars{$var}->{name}) {
+ $info{$cfg_vars{$var}->{name}} = $value;
+ } else {
+ warn "Unknown variable in config file $file: $var = $value\n";
+ }
+ }
+ close(INFILE);
+ #
+ # Special data processing
+ #
+ $info{dat_path} =~ s#/*$## if defined $info{dat_path};
+ $info{pid_path} =~ s#/*$## if defined $info{pid_path};
+ $info{interval} *= 1e6 if defined $info{interval};
+
+ return %info;
+}
+
+sub read_configfile {
+ my ($info) = @_;
+
+ my $mid = $info->{mid};
+ my $sender = $info->{sender};
+ my $shid = $info->{shid};
+ my $receiver = $info->{receiver};
+ my $rhid = $info->{rhid};
+ my $cfgsdir = $info->{cfgsdir};
+ my $domain = $info->{domain};
+ my $codomain = $info->{codomain}; # Are we included as co-domain?
+ #
+ # Read file
+ #
+ my $configfile_base = "$sender.$receiver.$mid.cfg";
+ my $configfile = "$cfgsdir/$configfile_base";
+ if ($codomain) {
+ die "Missing config file from co-domain $domain ($configfile)" .
+ " - Please fix!\n" unless -e $configfile;
+ } else {
+ $configfiles{$configfile} = 1; # Mark file as ok
+ # Add files to rsync list for sender and receiver:
+ push @{$cfgfiles_by_host{$shid}},[$configfile,"S.$configfile_base"];
+ push @{$cfgfiles_by_host{$rhid}},[$configfile,"R.$configfile_base"];
+ unless (-e $configfile) {
+ $info->{rewrite} = 1; # not really a *re*write ;-)
+ return 0;
+ }
+ }
+ print "Reading config:\t$sender\t$receiver\t$mid\n" if $verbose;
+ my %read_info = parse_configfile($configfile);
+ unless (%read_info) {
+ if ($codomain) {
+ die "Cannot read config file from co-domain $domain ($configfile)" .
+ " - Please fix!\n";
+ }
+ $info->{rewrite} = 1;
+ return 0;
+ }
+ #
+ # Verify read data
+ #
+ if ($sender ne $read_info{sender}) {
+ die "Sender name in filename \"$configfile\" differs from " .
+ "sender name read from file! - Please fix it or delete file!\n";
+ }
+ if ($receiver ne $read_info{receiver}) {
+ die "Receiver name in filename \"$configfile\" differs from " .
+ "receiver name read from file! - Please fix it or delete file!\n";
+ }
+ if ( $sender eq $receiver &&
+ (( $read_info{senderip} ne "127.0.0.1" ) ||
+ ( $read_info{receiverip} ne "127.0.0.1" )) ) {
+ print " Sender equal receiver and IP not 127.0.0.1\n" if $verbose;
+ $read_info{rewrite} = 1;
+ $read_info{senderip} = "127.0.0.1";
+ $read_info{receiverip} = "127.0.0.1";
+ }
+ # Some variables have to be checked for changed values. There's just one
+ # difference between strings and numbers: ne vs. !=
+ # The names in the list are keys for %cfg_vars.
+ foreach ( qw(datadir pidpath datafile senderip receiverip mid precedence
+ packetsize interval packetgroupsize packetoffset
+ verbose transmittime) ) {
+ if (
+ ($cfg_vars{$_}->{type} eq "string" &&
+ $read_info{$cfg_vars{$_}->{name}} ne $info->{$cfg_vars{$_}->{name}})
+ ||
+ ($cfg_vars{$_}->{type} eq "int" &&
+ $read_info{$cfg_vars{$_}->{name}} != $info->{$cfg_vars{$_}->{name}})
+ ) {
+ if ($codomain) {
+ die "Config file $configfile from co-domain $codomain out of sync" .
+ " - Please fix!\n";
+ }
+ print " Changed variable: $_/$cfg_vars{$_}->{name} = " .
+ $read_info{$cfg_vars{$_}->{name}} . " -> " .
+ $info->{$cfg_vars{$_}->{name}} . "\n"
+ if $verbose;
+ $read_info{rewrite} = 1; # Mark configfile for rewriting
+ }
+ }
+ if ($read_info{interval} != $info->{interval} ||
+ $read_info{groupsize} != $info->{groupsize} ||
+ $read_info{packetinterval} != $info->{packetinterval} ||
+ $read_info{transmittime} != $info->{transmittime}) {
+ # offset needs to be recalculated
+ if ($codomain) {
+ die "Config file $configfile from co-domain $codomain out of sync" .
+ " - Please fix!\n";
+ }
+ $read_info{offset} = undef;
+ }
+ #
+ # read data is OK -> use data and inform about it
+ #
+ %$info = (
+ %read_info, # First "old" config
+ %$info, # Then "new" config
+ );
+ unless (defined $info->{port}) {
+ if ($codomain) {
+ die "Config file $configfile from co-domain $domain out of sync" .
+ " - Please fix!\n";
+ }
+ print " New port will be assigned\n" if $verbose;
+ $info->{rewrite} = 1;
+ }
+ unless (defined $info->{offset} &&
+ defined $info->{offset_end_snd} && defined $info->{offset_end_rcv}) {
+ if ($codomain) {
+ die "Config file $configfile from co-domain $domain out of sync" .
+ " - Please fix!\n";
+ }
+ print " New sending offset will be assigned\n" if $verbose;
+ $info->{rewrite} = 1;
+ $info->{offset} = undef;
+ }
+ $interval_common = lcm($interval_common,$info->{interval_common})
+ if defined $info->{interval_common};
+ print " Config will be rewritten\n" if $verbose && $info->{rewrite};
+ return 1;
+}
+
+sub write_configfile {
+ my ($info) = @_;
+
+ return 1 unless $info->{rewrite};
+
+ my $mid = $info->{mid};
+ my $sender = $info->{sender};
+ my $receiver = $info->{receiver};
+
+ print "Writing config:\t$sender\t$receiver\t$mid\n" if $verbose;
+ my $configfile = "$cfgsdir/$sender.$receiver.$mid.cfg";
+ unless (open(OUTFILE, ">$configfile")) {
+ warn "SYSTEM ERROR: Unable to open file ${configfile}: $!\n";
+ return 0;
+ }
+ # The names in the list are keys for %cfg_vars.
+ foreach my $var ( qw(datadir pidpath datafile senderip receiverip
+ sendername receivername mid port packetsize precedence
+ interval packetgroupsize packetoffset
+ sendingoffset
+ verbose
+ transmittime sendingoffset_end receivingoffset_end) )
{
+ my $value = $info->{$cfg_vars{$var}->{name}};
+ if ( $cfg_vars{$var}->{type} eq "string") {
+ $value .= "/" if ($var eq "datadir") || ($var eq "pidpath");
+ print OUTFILE "$var=$value\n";
+ } elsif ( $cfg_vars{$var}->{type} eq "int") {
+ $value /= 1e6 if $var eq "interval";
+ printf OUTFILE "$var=%i\n", $value;
+ } else {
+ die "Internal error: Unknown datatype $info->{$cfg_vars{$_}->{type}}";
+ }
+ }
+ print OUTFILE "intervalcommon=$interval_common\n";
+ close(OUTFILE);
+
+ return 1;
+}
+
+sub plot_interval {
+ my %p = (
+ file => "&STDOUT",
+ interval => 0,
+ intervals => {},
+ @_
+ );
+ $p{file} = "&STDOUT" unless $p{file}; # false value => STDOUT
+ open PL, ">$p{file}" or print STDERR "Cannot open $p{file}: $!\n", return;
+ print PL <<__EOPL1__;
+#proc page
+ pagesize: 20 15
+__EOPL1__
+ print PL "#proc getdata\n data: ";
+ foreach my $host (sort keys %{$p{intervals}}) {
+ (my $hostname = $host) =~ s/\s+/_/g;
+ for (my $entry = $p{intervals}->{$host} ;
+ defined $entry ;
+ $entry = $entry->{next}) {
+ print PL " $hostname $entry->{begin} $entry->{end}\n";
+ }
+ }
+ print PL <<__EOPL2__;
+
+#proc categories
+ axis: y
+ datafield: 1
+
+#proc areadef
+ rectangle: 2 1 14 10
+ autoheight: 0.2
+ xrange: 0 $p{interval}
+ yscaletype: categories
+ yaxis.stubs: usecategories
+ xaxis.stubs: inc 1 1000000
+
+#proc bars
+ axis: x
+ locfield: 1
+ segmentfields: 2 3
+ color: black
+ outline: no
+__EOPL2__
+
+ close PL;
+}
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-cfg-create.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-cfg-create.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[B<--delete>]> S<[B<--sync>]> S<[B<--reload>]>
+ S<[B<--dry-run>]> S<[B<--all>]>
+ S<[B<--sshuser>=I<USER>]> S<[B<--[no]interactive>]>
+ S<[B<--plot>[=F<PLOTFILE>]]> S<[B<--[no]plot-all>]>
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--dry-run>
+
+Do everything except making any permanent changes. You can even use the
+parameters B<--delete>, B<--sync> and B<--reload> to get more output.
+
+Note: At the moment a lot of things are only skipped without doing a little
+bit more tests, that would be possible.
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--[no]update>
+
+Update configuration files in local directory. It is the default behaviour to
+read in the configuration files for the routes and compare them to the global
+configuration file. If not B<--dry-run> is specified the newly calculated
data
+is written to the files. By using B<--noupdate> you prevent this script from
+comparing the existing routes configuration to the global configuration file.
+The configuration are also not updated.
+
+This parameter should be used with caution! It is designed for making it
+possible to copy/sync and (re)start/stop (B<--sync>/B<--reload>) manually
+modified routes configurations. Be especially with the parameter B<--delete>!
+
+Configuration file: none
+
+Default: true
+
+
+=item B<--delete>
+
+Delete configuration files in local directory if they are not used anymore.
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--sync>
+
+Copy/sync configuration files to remote hosts.
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--reload>
+
+Sync instances on remote hosts (e.g. start/stop if necessary).
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--all>
+
+Force B<--delete>, B<--sync> and B<--reload>.
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--sshuser>=I<USER>
+
+Use I<USER> as user for ssh.
+
+Configuration file: ssh_args
+
+Default: current user
+
+
+=item B<--[no]sshinteractive>
+
+Set SSH mode to interactive (e.g. password prompt!).
+See also L<Net::SSH::Perl>.
+
+Configuration file: none
+
+Default: enabled
+
+
+=item B<--plot>[=F<PLOTFILE>]
+
+If set, print a Ploticus plot file to F<PLOTFILE>. If only B<--plot> is used,
+plot will be written to F<stdout>. That is most likely not what you want!
+
+Configuration file: none
+
+Default: not set
+
+
+=item B<--[no]plot-all>
+
+Include all hosts from all co-domains in plot generated using S<B<--plot>>.
+Default is to only include hosts from the main domain.
+
+Important: If hosts are part of main domain and co-domains, the name from the
+main domain will be used! Hosts that are in more than one co-domain, but not
in
+the main domain will be displayed as seperate hosts!
+
+Configuration file: none
+
+Default: not set
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
Property changes on: trunk/build/HADES/bin/hades-cfg-create.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-cfg-gui.pl
===================================================================
--- trunk/build/HADES/bin/hades-cfg-gui.pl (rev
0)
+++ trunk/build/HADES/bin/hades-cfg-gui.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,1136 @@
+#!/usr/bin/perl
+
+#TODO
+# - Rework (at least) the graphical part using Glade
+# - "Save as" does not change location for further "Save" actions...
+# - Enable creation of new configuration file by using
+# lib/Hades/Config/template.conf
+# - Would it be better to use only PPI? At least in some places?
+# Would be quite a lot of rework...
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+use Gtk2 '-init';
+use Gtk2::SimpleMenu;
+use Data::Dumper;
+use Pod::Usage;
+
+use Hades;
+use Hades::Config::FileSaver;
+
+
+create_config() or die;
+
+# Do not always use the config hash, instead set useful variables
+my $debug = $config{debug};
+$Hades::Config::FileSaver::debug = $debug;
+
+my $config = $Hades::config;
+my $saver = Hades::Config::FileSaver->new(config => $config);
+my $cfg = $config->{config};
+
+my $configfile;
+my %hosts;
+my @hosts;
+my %routes;
+my %routes_default;
+
+#my $measurement_width = 100;
+my $measurement_val_width = 120;
+my $measurement_val_height = 23;
+my $button_height = 30;
+my $hostlist_width = 200;
+my $hostlist_height = 100;
+my $iflist_height = 20;
+
+my $window = Gtk2::Window->new;
+#$window->set_size_request(760,500);
+$window->set_title ('HADES IP performance measurement configuration');
+$window->signal_connect (destroy => sub { Gtk2->main_quit; });
+$window->set_border_width(3);
+
+my $notebook = Gtk2::Notebook->new;
+$window->add($notebook);
+
+my $table = Gtk2::Table->new (3, 3, 0);
+$notebook->append_page($table, "Measurements");
+$table->set_row_spacings(5);
+$table->set_col_spacings(5);
+
+my $table_configuration = Gtk2::Table->new (2, 2, 0);
+$notebook->append_page($table_configuration, "Configuration");
+
+my $table_defaults = Gtk2::Table->new (2, 2, 0);
+$notebook->append_page($table_defaults, "Defaults");
+
+my $swindow_sender = Gtk2::ScrolledWindow->new;
+my $swindow_s_if = Gtk2::ScrolledWindow->new;
+my $swindow_receiver = Gtk2::ScrolledWindow->new;
+my $swindow_r_if = Gtk2::ScrolledWindow->new;
+my $table_measurement = Gtk2::Table->new(2,10,0);
+my $fileselection;
+my $label_measurement = Gtk2::Label->new ("ID:");
+my $label_packetsize = Gtk2::Label->new ("Packet Size:");
+my $spinbutton_measurement = Gtk2::SpinButton->new(Gtk2::Adjustment->new (0,
0, 1, 1, 10, 10),1,0);
+my $spinbutton_packetsize = Gtk2::SpinButton->new(Gtk2::Adjustment->new (0,
40, 1500, 1, 10, 10),1,0);
+my $label_interval = Gtk2::Label->new ("Interval:");
+my $spinbutton_interval = Gtk2::SpinButton->new(Gtk2::Adjustment->new (0, 0,
1e9, 1, 10, 10),1,0);
+my $label_groupsize = Gtk2::Label->new ("Groupsize:");
+my $spinbutton_groupsize = Gtk2::SpinButton->new(Gtk2::Adjustment->new (0,
1, 100, 1, 10, 10),1,0);
+my $label_packetinterval = Gtk2::Label->new ("Packetinterval:");
+my $spinbutton_packetinterval = Gtk2::SpinButton->new(Gtk2::Adjustment->new
(0, 0, 1e6, 1, 10, 10),1,0);
+my $label_transmittime = Gtk2::Label->new ("Transmittime:");
+my $spinbutton_transmittime = Gtk2::SpinButton->new(Gtk2::Adjustment->new
(0, 0, 1e6, 1, 10, 10),1,0);
+my $label_precedence = Gtk2::Label->new("Precedence:");
+my $combo_precedence = Gtk2::Combo->new();
+my $label_alert = Gtk2::Label->new("Alert:");
+my $combo_alert = Gtk2::Combo->new();
+my $label_verbose = Gtk2::Label->new ("Verbose:");
+my $combo_verbose = Gtk2::Combo->new();
+my $remove = Gtk2::Button->new_from_stock('gtk-delete');
+my $if_window = Gtk2::Window->new;
+my $new_if_name;
+my $new_if_ip;
+my $new_if_alias;
+my $host_window = Gtk2::Window->new;
+my $host_name_entry;
+my $host_ip_entry;
+my ($ssh_user_entry, $ssh_ident_entry, $ssh_proto_entry);
+my $log_path_entry;
+my ($active_r_if, $active_s_if);
+my ($liststore_sender, $liststore_receiver, $liststore_r_if,
$liststore_s_if);
+my ($active_sender, $active_receiver, $active_measurement);
+my $if_host;
+
+init();
+
+redraw_sender();
+redraw_s_if();
+redraw_receiver();
+redraw_r_if();
+redraw_measurement();
+
+my $save_button = Gtk2::Button->new_from_stock ('gtk-save');
+$save_button->set_size_request($measurement_val_width,$button_height);
+$table->attach_defaults($save_button,2,3,10,11);
+$save_button->signal_connect( clicked => \&save);
+
+my $fileopen_button = Gtk2::Button->new_from_stock ('gtk-open');
+$fileopen_button->set_size_request($measurement_val_width,$button_height);
+$table->attach_defaults($fileopen_button,3,4,10,11);
+$fileopen_button->signal_connect( clicked => \&fileopen);
+
+my $save_as_button = Gtk2::Button->new_from_stock ('gtk-save-as');
+$save_as_button->set_size_request($measurement_val_width,$button_height);
+$table->attach_defaults($save_as_button,2,3,11,12);
+$save_as_button->signal_connect( clicked => \&save_as);
+
+my $quit_button = Gtk2::Button->new_from_stock ('gtk-quit');
+$quit_button->set_size_request($measurement_val_width,$button_height);
+$table->attach_defaults($quit_button,3,4,11,12);
+$quit_button->signal_connect( clicked => sub {Gtk2->main_quit;});
+
+redraw_configuration();
+redraw_defaults();
+
+$window->show_all;
+Gtk2->main;
+
+
+
+sub redraw_sender {
+ $swindow_sender->destroy;
+ $swindow_sender = Gtk2::ScrolledWindow->new;
+ $swindow_sender->set_size_request($hostlist_width,$hostlist_height);
+ $swindow_sender->set_policy ('automatic', 'automatic');
+ $liststore_sender = Gtk2::ListStore->new('Glib::String');
+ foreach my $host (@hosts) {
+# foreach $host (keys %{$cfg->{hosts}}) {
+ my $iter = $liststore_sender->append;
+ $liststore_sender->set($iter, 0 => $host);
+ }
+ my $treeview_sender = Gtk2::TreeView->new_with_model($liststore_sender);
+ $treeview_sender->set_reorderable(1);
+ $treeview_sender->get_selection->signal_connect(changed =>
\&senderbutton_action);
+
+ $swindow_sender->add($treeview_sender);
+ my $renderer = Gtk2::CellRendererText->new;
+ my $column = Gtk2::TreeViewColumn->new_with_attributes
("Sender",$renderer,text => 0);
+ $column->set_clickable(1);
+ $column->signal_connect(clicked => \&sender_column_action);
+ $treeview_sender->append_column ($column);
+
+ my $treepath = Gtk2::TreePath->new_first;
+ $treeview_sender->set_cursor_on_cell ($treepath, $column, $renderer, 0);
+
+ $table->attach_defaults($swindow_sender,0,1,0,9);
+
+ redraw_s_if();
+}
+
+
+sub sender_column_action {
+ my $menu = Gtk2::Menu->new;
+ my $sender_add = Gtk2::MenuItem->new("Add host");
+ my $sender_del = Gtk2::MenuItem->new("Delete host");
+ my $sender_edit = Gtk2::MenuItem->new("Edit host");
+ $sender_add->signal_connect('activate', sub {host("")});
+ $sender_del->signal_connect('activate', sub {del_host($active_sender)});
+ $sender_edit->signal_connect('activate', sub {host($active_sender)});
+ $menu->append($sender_add);
+ $menu->append($sender_del);
+ $menu->append($sender_edit);
+ $menu->popup(undef, undef, undef, undef, 1, 0);
+ $sender_add->show();
+ $sender_del->show();
+ $sender_edit->show();
+}
+
+sub senderbutton_action {
+ my $id = shift;
+ my $iter = $id->get_selected;
+ $active_sender = $liststore_sender->get($iter);
+ redraw_s_if();
+}
+
+sub redraw_s_if {
+ $swindow_s_if->destroy;
+ $swindow_s_if = Gtk2::ScrolledWindow->new;
+ $swindow_s_if->set_size_request($hostlist_width,$iflist_height);
+ $swindow_s_if->set_policy ('automatic', 'automatic');
+ $liststore_s_if = Gtk2::ListStore->new('Glib::String');
+ my @s_if = ();
+ if (exists $cfg->{hosts}{$active_sender}) {
+ @s_if = keys %{$cfg->{hosts}{$active_sender}{"interfaces"}};
+ }
+ foreach my $if (@s_if) {
+ my $iter = $liststore_s_if->append;
+ $liststore_s_if->set($iter, 0 => $if);
+ }
+ my $treeview_s_if = Gtk2::TreeView->new_with_model($liststore_s_if);
+ $treeview_s_if->set_reorderable(1);
+ $treeview_s_if->get_selection->signal_connect(changed => \&s_if_action);
+ $swindow_s_if->add($treeview_s_if);
+ my $renderer = Gtk2::CellRendererText->new;
+ my $column = Gtk2::TreeViewColumn->new_with_attributes
("Interfaces",$renderer,text => 0);
+ $column->set_clickable(1);
+ $column->signal_connect(clicked => \&s_if_column_action);
+ $treeview_s_if->append_column ($column);
+
+ my $treepath = Gtk2::TreePath->new_first;
+ $treeview_s_if->set_cursor_on_cell ($treepath, $column, $renderer, 0);
+ $active_s_if = $s_if[0];
+
+ $table->attach_defaults($swindow_s_if,0,1,9,12);
+ $window->show_all;
+ $active_measurement = 0;
+ redraw_measurement();
+}
+
+sub s_if_column_action {
+ my $menu = Gtk2::Menu->new;
+ my $s_if_add = Gtk2::MenuItem->new("Add interface");
+ my $s_if_del = Gtk2::MenuItem->new("Delete interface");
+ my $s_if_edit = Gtk2::MenuItem->new("Edit interface");
+ my $s_if_add_mea = Gtk2::MenuItem->new("Add measurements to all
receivers");
+ my $s_if_del_mea = Gtk2::MenuItem->new("Delete all measurements from this
sender");
+ $s_if_add->signal_connect('activate', sub {if_add($active_sender)});
+ $s_if_del->signal_connect('activate', sub
{if_del($active_sender,$active_s_if)});
+ $s_if_edit->signal_connect('activate', sub
{if_edit($active_sender,$active_s_if)});
+ $s_if_add_mea->signal_connect('activate', \&add_default_sender);
+ $s_if_del_mea->signal_connect('activate', \&remove_sender);
+ $menu->append($s_if_add);
+ $menu->append($s_if_del);
+ $menu->append($s_if_edit);
+ $menu->append($s_if_add_mea);
+ $menu->append($s_if_del_mea);
+ $menu->popup(undef, undef, undef, undef, 1, 0);
+ $s_if_add->show();
+ $s_if_del->show();
+ $s_if_edit->show();
+ $s_if_add_mea->show();
+ $s_if_del_mea->show();
+}
+
+sub s_if_action {
+ my $id = shift;
+ my $iter = $id->get_selected;
+ $active_s_if = $liststore_s_if->get($iter);
+ $active_measurement = 0;
+ redraw_measurement();
+}
+
+sub redraw_receiver {
+ $swindow_receiver->destroy;
+ $swindow_receiver = Gtk2::ScrolledWindow->new;
+ $swindow_receiver->set_size_request($hostlist_width,$hostlist_height);
+ $swindow_receiver->set_policy ('automatic', 'automatic');
+ $liststore_receiver = Gtk2::ListStore->new('Glib::String');
+ my $iter;
+ foreach my $host (@hosts) {
+ $iter = $liststore_receiver->append;
+ $liststore_receiver->set($iter, 0 => $host);
+ }
+ my $treeview_receiver =
Gtk2::TreeView->new_with_model($liststore_receiver);
+ $treeview_receiver->set_reorderable(1);
+ $treeview_receiver->get_selection->signal_connect(changed =>
\&receiverbutton_action);
+
+ $swindow_receiver->add($treeview_receiver);
+ my $renderer = Gtk2::CellRendererText->new;
+ my $column = Gtk2::TreeViewColumn->new_with_attributes
("Receiver",$renderer,text => 0);
+ $column->set_clickable(1);
+ $column->signal_connect(clicked => \&receiver_column_action);
+ $treeview_receiver->append_column ($column);
+
+ my $treepath = Gtk2::TreePath->new_first;
+ $treeview_receiver->set_cursor_on_cell ($treepath, $column, $renderer, 0);
+
+ $table->attach_defaults($swindow_receiver,1,2,0,9);
+ redraw_r_if();
+}
+
+sub receiver_column_action {
+ my $menu = Gtk2::Menu->new;
+ my $receiver_add = Gtk2::MenuItem->new("Add host");
+ my $receiver_del = Gtk2::MenuItem->new("Delete host");
+ my $receiver_edit = Gtk2::MenuItem->new("Edit host");
+ $receiver_add->signal_connect('activate', sub {host("")});
+ $receiver_del->signal_connect('activate', sub
{del_host($active_receiver)});
+ $receiver_edit->signal_connect('activate', sub {host($active_receiver)});
+
+ $menu->append($receiver_add);
+ $menu->append($receiver_del);
+ $menu->append($receiver_edit);
+ $menu->popup(undef, undef, undef, undef, 1, 0);
+ $receiver_add->show();
+ $receiver_del->show();
+ $receiver_edit->show();
+}
+
+sub receiverbutton_action {
+ my $id = shift;
+ my $iter = $id->get_selected;
+ $active_receiver = $liststore_receiver->get($iter);
+ redraw_r_if();
+}
+
+sub redraw_r_if {
+ $swindow_r_if->destroy;
+ $swindow_r_if = Gtk2::ScrolledWindow->new;
+ $swindow_r_if->set_size_request($hostlist_width,$iflist_height);
+ $swindow_r_if->set_policy ('automatic', 'automatic');
+ $liststore_r_if = Gtk2::ListStore->new('Glib::String');
+ my @r_if = ();
+ if (exists $cfg->{hosts}{$active_receiver}) {
+ @r_if = keys %{$cfg->{hosts}{$active_receiver}{"interfaces"}};
+ }
+ my $iter;
+ foreach my $if (@r_if) {
+ $iter = $liststore_r_if->append;
+ $liststore_r_if->set($iter, 0 => $if);
+ }
+ my $treeview_r_if = Gtk2::TreeView->new_with_model($liststore_r_if);
+ $treeview_r_if->set_reorderable(1);
+ $treeview_r_if->get_selection->signal_connect(changed => \&r_if_action);
+ $swindow_r_if->add($treeview_r_if);
+ my $renderer = Gtk2::CellRendererText->new;
+ my $column = Gtk2::TreeViewColumn->new_with_attributes
("Interfaces",$renderer,text => 0);
+ $column->set_clickable(1);
+ $column->signal_connect(clicked => \&r_if_column_action);
+ $treeview_r_if->append_column ($column);
+
+ my $treepath = Gtk2::TreePath->new_first;
+ $treeview_r_if->set_cursor_on_cell ($treepath, $column, $renderer, 0);
+ $active_r_if = $r_if[0];
+
+ $table->attach_defaults($swindow_r_if,1,2,9,12);
+ $window->show_all;
+ $active_measurement = 0;
+ redraw_measurement();
+}
+
+sub r_if_column_action {
+ my $menu = Gtk2::Menu->new;
+ my $r_if_add = Gtk2::MenuItem->new("Add interface");
+ my $r_if_del = Gtk2::MenuItem->new("Delete interface");
+ my $r_if_edit = Gtk2::MenuItem->new("Edit interface");
+ my $r_if_add_mea = Gtk2::MenuItem->new("Add measurements from all
senders");
+ my $r_if_del_mea = Gtk2::MenuItem->new("Delete all measurements to this
receiver");
+ $r_if_add->signal_connect('activate', sub {if_add($active_receiver)});
+ $r_if_del->signal_connect('activate', sub
{if_del($active_receiver,$active_r_if)});
+ $r_if_edit->signal_connect('activate', sub
{if_edit($active_receiver,$active_r_if)});
+ $r_if_add_mea->signal_connect('activate', \&add_default_receiver);
+ $r_if_del_mea->signal_connect('activate', \&remove_receiver);
+ $menu->append($r_if_add);
+ $menu->append($r_if_del);
+ $menu->append($r_if_edit);
+ $menu->append($r_if_add_mea);
+ $menu->append($r_if_del_mea);
+ $menu->popup(undef, undef, undef, undef, 1, 0);
+ $r_if_add->show();
+ $r_if_del->show();
+ $r_if_edit->show();
+ $r_if_add_mea->show();
+ $r_if_del_mea->show();
+}
+
+sub r_if_action {
+ my $id = shift;
+ my $iter = $id->get_selected;
+ $active_r_if = $liststore_r_if->get($iter);
+ $active_measurement = 0;
+ redraw_measurement();
+}
+
+sub redraw_measurement {
+ my @meas = ();
+ if (defined $active_s_if && defined $active_r_if &&
+ ref($cfg->{routes}{$active_s_if}{$active_r_if}) eq "ARRAY"
+ ) {
+ @meas = @{$cfg->{routes}{$active_s_if}{$active_r_if}};
+ }
+ foreach my $widget (($label_measurement, $spinbutton_measurement,
$label_packetsize, $spinbutton_packetsize, $label_interval,
$spinbutton_interval, $label_groupsize, $spinbutton_groupsize,
$label_packetinterval, $spinbutton_packetinterval, $label_transmittime,
$spinbutton_transmittime, $label_precedence, $combo_precedence, $label_alert,
$combo_alert, $label_verbose, $combo_verbose, $remove)) {
+ $widget->destroy();
+ }
+ if ($#meas > -1) {
+ my $m = $meas[$active_measurement];
+
+ $label_measurement = Gtk2::Label->new ("ID:");
+ $spinbutton_measurement = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($active_measurement, 0, $#meas, 1, 10, 10),1,0);
+ $spinbutton_measurement->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_measurement->set_name
("$active_s_if.$active_r_if.activemeasurement");
+ $spinbutton_measurement->signal_connect("value-changed" =>
\&measurement_action);
+ $table->attach_defaults($label_measurement,2,3,0,1);
+ $table->attach_defaults($spinbutton_measurement,3,4,0,1);
+
+ $label_packetsize = Gtk2::Label->new ("Packet Size:");
+ $spinbutton_packetsize = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($$m{"packetsize"}, 40, 1500, 1, 10, 10),1,0);
+ $spinbutton_packetsize->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_packetsize->set_name
("$active_s_if.$active_r_if.$active_measurement");
+ $spinbutton_packetsize->signal_connect("value-changed" =>
\&packetsize_action);
+ $table->attach_defaults($label_packetsize,2,3,1,2);
+ $table->attach_defaults($spinbutton_packetsize,3,4,1,2);
+
+ $label_interval = Gtk2::Label->new ("Interval:");
+ $spinbutton_interval = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($$m{"interval"}, 0, 1e9, 1, 1000000, 1000000),1,0);
+ $spinbutton_interval->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_interval->set_name ("$active_s_if.$active_r_if.interval");
+ $spinbutton_interval->signal_connect("value-changed" =>
\&interval_action);
+ $table->attach_defaults($label_interval,2,3,2,3);
+ $table->attach_defaults($spinbutton_interval,3,4,2,3);
+
+ $label_groupsize = Gtk2::Label->new ("Groupsize:");
+ $spinbutton_groupsize = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($$m{"groupsize"}, 1, 100, 1, 10, 10),1,0);
+ $spinbutton_groupsize->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_groupsize->set_name ("$active_s_if.$active_r_if.groupsize");
+ $spinbutton_groupsize->signal_connect("value-changed" =>
\&groupsize_action);
+ $table->attach_defaults($label_groupsize,2,3,3,4);
+ $table->attach_defaults($spinbutton_groupsize,3,4,3,4);
+
+ $label_packetinterval = Gtk2::Label->new ("Packetinterval:");
+ $spinbutton_packetinterval = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($$m{"packetinterval"}, 0, 1e6, 1, 1000, 1000),1,0);
+ $spinbutton_packetinterval->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_packetinterval->set_name
("$active_s_if.$active_r_if.packetinterval");
+ $spinbutton_packetinterval->signal_connect("value-changed" =>
\&packetinterval_action);
+ $table->attach_defaults($label_packetinterval,2,3,4,5);
+ $table->attach_defaults($spinbutton_packetinterval,3,4,4,5);
+
+ $label_transmittime = Gtk2::Label->new ("Transmittime:");
+ $spinbutton_transmittime = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($$m{"transmittime"}, 0, 1e6, 1, 1000, 1000),1,0);
+ $spinbutton_transmittime->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_transmittime->set_name
("$active_s_if.$active_r_if.transmittime");
+ $spinbutton_transmittime->signal_connect("value-changed" =>
\&transmittime_action);
+ $table->attach_defaults($label_transmittime,2,3,5,6);
+ $table->attach_defaults($spinbutton_transmittime,3,4,5,6);
+
+ $label_precedence = Gtk2::Label->new("Precedence:");
+ $combo_precedence = Gtk2::Combo->new();
+ $combo_precedence->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_precedence->set_popdown_strings ("0x0","0xb8");
+ $combo_precedence->entry->set_text($$m{"precedence"});
+ $combo_precedence->entry->set_name
("$active_s_if.$active_r_if.precedence");
+ $combo_precedence->entry->signal_connect("activate" =>
\&precedence_action);
+ $combo_precedence->entry->signal_connect("focus-out-event" =>
\&precedence_action);
+ $table->attach_defaults($label_precedence,2,3,6,7);
+ $table->attach_defaults($combo_precedence,3,4,6,7);
+
+ $label_alert = Gtk2::Label->new("Alert:");
+ $combo_alert = Gtk2::Combo->new();
+ $combo_alert->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_alert->set_popdown_strings ("no", "yes");
+ $combo_alert->entry->set_text($$m{"alert"} ? "yes" : "no");
+ $combo_alert->entry->set_name ("$active_s_if.$active_r_if.alert");
+ $combo_alert->entry->signal_connect("activate" => \&alert_action);
+ $combo_alert->entry->signal_connect("focus-out-event" => \&alert_action);
+ $table->attach_defaults($label_alert,2,3,7,8);
+ $table->attach_defaults($combo_alert,3,4,7,8);
+
+ $label_verbose = Gtk2::Label->new ("Verbose:");
+ $combo_verbose = Gtk2::Combo->new();
+ $combo_verbose->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_verbose->set_popdown_strings ("no", "yes");
+ $combo_verbose->entry->set_text($$m{"verbose"} ? "yes" : "no");
+ $combo_verbose->entry->set_name ("$active_s_if.$active_r_if.alert");
+ $combo_verbose->entry->signal_connect("activate" => \&verbose_action);
+ $combo_verbose->entry->signal_connect("focus-out-event" =>
\&verbose_action);
+ $table->attach_defaults($label_verbose,2,3,8,9);
+ $table->attach_defaults($combo_verbose,3,4,8,9);
+
+ $remove = Gtk2::Button->new_from_stock('gtk-delete');
+ $remove->set_size_request($measurement_val_width,$button_height);
+ $table->attach_defaults($remove,2,3,9,10);
+ $remove->signal_connect(clicked => \&remove_mea);
+ } else {
+ $active_measurement = -1;
+ for (my $i = 0; $i<=8; $i++) {
+ my $label = Gtk2::Label->new("");
+ $label->set_size_request($measurement_val_width,
$measurement_val_height);
+ $table->attach_defaults($label,2,3,$i,$i+1);
+ }
+ }
+ my $add = Gtk2::Button->new_from_stock('gtk-add');
+ $add->set_size_request($measurement_val_width,$button_height);
+ $table->attach_defaults($add,3,4,9,10);
+ $add->signal_connect(clicked => \&add_mea);
+
+ $window->show_all;
+}
+
+sub packetsize_action {
+ my $id = shift;
+ my $value = $id->get_value();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{packetsize}
= $value;
+}
+
+sub interval_action {
+ my $id = shift;
+ my $value = $id->get_value();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{interval}
= $value;
+}
+
+sub groupsize_action {
+ my $id = shift;
+ my $value = $id->get_value();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{groupsize}
= $value;
+}
+
+sub packetinterval_action {
+ my $id = shift;
+ my $value = $id->get_value();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{packetinterval}
= $value;
+}
+
+sub transmittime_action {
+ my $id = shift;
+ my $value = $id->get_value();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{transmittime}
= $value;
+}
+
+sub precedence_action {
+ my $id = shift;
+ my $value = $id->get_text();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{precedence}
= $value;
+ return 0;
+}
+
+sub alert_action {
+ my $id = shift;
+ my $value = $id->get_text();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{alert}
= $value eq "yes" ? 1 : 0;
+ return 0;
+}
+
+sub verbose_action {
+ my $id = shift;
+ my $value = $id->get_text();
+
${${$cfg->{routes}{$active_s_if}{$active_r_if}}[$active_measurement]}{verbose}
= $value eq "yes" ? 1 : 0;
+ return 0;
+}
+
+sub add_default_sender {
+ foreach my $host (@hosts) {
+ my @ifs = keys %{$cfg->{hosts}{$host}{"interfaces"}};
+ foreach my $if (@ifs) {
+ unless ($if eq $active_s_if) {
+ push @{$cfg->{routes}{$active_s_if}{$if}}, {%routes_default};
+ }
+ }
+ }
+ redraw_measurement();
+}
+
+sub add_default_receiver {
+ foreach my $host (@hosts) {
+ my @ifs = keys %{$cfg->{hosts}{$host}{"interfaces"}};
+ foreach my $if (@ifs) {
+ unless ($if eq $active_r_if) {
+ push @{$cfg->{routes}{$if}{$active_r_if}}, {%routes_default};
+ }
+ }
+ }
+ redraw_measurement();
+}
+
+sub remove_sender {
+ foreach my $host (@hosts) {
+ my @ifs = keys %{$cfg->{hosts}{$host}{"interfaces"}};
+ foreach my $if (@ifs) {
+ if (exists $cfg->{routes}{$active_s_if}{$if}) {
+ delete $cfg->{routes}{$active_s_if}{$if};
+ }
+ }
+ }
+ redraw_measurement();
+}
+
+sub remove_receiver {
+ foreach my $host (@hosts) {
+ my @ifs = keys %{$cfg->{hosts}{$host}{"interfaces"}};
+ foreach my $if (@ifs) {
+ if (exists $cfg->{routes}{$if}{$active_r_if}) {
+ delete $cfg->{routes}{$if}{$active_r_if};
+ }
+ }
+ }
+ redraw_measurement();
+}
+
+sub host {
+ my $host_name = shift;
+ my ($ssh_user, $ip, $ssh_ident, $ssh_proto, $log_path, $bin_path,
$cfg_path, $pid_path, $dat_path, %interfaces);
+ if ($host_name eq "") {
+ $ssh_user = ${$cfg->{hosts_default}{ssh_args}}{user};
+ $ssh_ident = ${$cfg->{hosts_default}{ssh_args}}{identity_files};
+ $ssh_proto = ${$cfg->{hosts_default}{ssh_args}}{protocol};
+ $log_path = ${$cfg->{hosts_default}}{log_path};
+ $bin_path = ${$cfg->{hosts_default}}{bin_path};
+ $cfg_path = ${$cfg->{hosts_default}}{cfg_path};
+ $pid_path = ${$cfg->{hosts_default}}{pid_path};
+ $dat_path = ${$cfg->{hosts_default}}{dat_path};
+ } else {
+ $ssh_user = ${$cfg->{hosts}{$host_name}}{ssh_args}{user};
+ $ssh_ident = ${$cfg->{hosts}{$host_name}}{ssh_args}{identity_files};
+ $ssh_proto = ${$cfg->{hosts}{$host_name}}{ssh_args}{protocol};
+ $ip = ${$cfg->{hosts}{$host_name}}{ip};
+ %interfaces = %{${$cfg->{hosts}{$host_name}}{interfaces}};
+ $log_path = ${$cfg->{hosts}{$host_name}}{log_path};
+ $bin_path = ${$cfg->{hosts}{$host_name}}{bin_path};
+ $cfg_path = ${$cfg->{hosts}{$host_name}}{cfg_path};
+ $pid_path = ${$cfg->{hosts}{$host_name}}{pid_path};
+ $dat_path = ${$cfg->{hosts}{$host_name}}{dat_path};
+ }
+ $host_window = Gtk2::Window->new;
+ $host_window->set_title ('Host');
+ $host_window->set_border_width(3);
+ my $host_table = Gtk2::Table->new (2, 2, 1);
+
+ my $host_name_label = Gtk2::Label->new ("Host name:");
+ $host_table->attach_defaults($host_name_label,0,1,0,1);
+
+ $host_name_entry = Gtk2::Entry->new_with_max_length(50);
+ $host_name_entry->set_text($host_name);
+ $host_table->attach_defaults($host_name_entry,1,2,0,1);
+
+ my $host_ip_label = Gtk2::Label->new ("IP:");
+ $host_table->attach_defaults($host_ip_label,0,1,1,2);
+
+ $host_ip_entry = Gtk2::Entry->new_with_max_length(20);
+ $host_ip_entry->set_text($ip);
+ $host_table->attach_defaults($host_ip_entry,1,2,1,2);
+
+ my $ssh_user_label = Gtk2::Label->new ("SSH user:");
+ $host_table->attach_defaults($ssh_user_label,0,1,2,3);
+
+ $ssh_user_entry = Gtk2::Entry->new_with_max_length(20);
+ $ssh_user_entry->set_text($ssh_user);
+ $host_table->attach_defaults($ssh_user_entry,1,2,2,3);
+
+ my $ssh_ident_label = Gtk2::Label->new ("SSH identities:");
+ $host_table->attach_defaults($ssh_ident_label,0,1,3,4);
+ my $format = "%s";
+ for (my $i = 1; $i <= $#{$ssh_ident}; $i++) {$format = $format . ",%s"}
+ my $ssh_ident_str = sprintf $format, @$ssh_ident;
+ $ssh_ident_entry = Gtk2::Entry->new_with_max_length(200);
+ $ssh_ident_entry->set_text($ssh_ident_str);
+ $host_table->attach_defaults($ssh_ident_entry,1,2,3,4);
+
+ my $ssh_proto_label = Gtk2::Label->new ("SSH protocol:");
+ $host_table->attach_defaults($ssh_proto_label,0,1,4,5);
+
+ $ssh_proto_entry = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($ssh_proto, 1, 2, 1, 1, 1),1,0);
+ $ssh_proto_entry->set_text($ssh_proto);
+ $host_table->attach_defaults($ssh_proto_entry,1,2,4,5);
+
+ my $log_path_label = Gtk2::Label->new ("Log path:");
+ $host_table->attach_defaults($log_path_label,0,1,5,6);
+
+ $log_path_entry = Gtk2::Entry->new_with_max_length(20);
+ $log_path_entry->set_text($log_path);
+ $host_table->attach_defaults($log_path_entry,1,2,5,6);
+
+ my $cancel_button = Gtk2::Button->new_from_stock ('gtk-cancel');
+ $host_table->attach_defaults($cancel_button,0,1,6,7);
+ $cancel_button->signal_connect(clicked => sub {$host_window->destroy();
+# $window->set_accept_focus(1);
+ });
+
+ my $ok_button = Gtk2::Button->new_from_stock ('gtk-ok');
+ $host_table->attach_defaults($ok_button,1,2,6,7);
+ $ok_button->signal_connect(clicked => sub {host_add_ok(0)}); #TODO ???
+
+ $host_window->add($host_table);
+ $host_window->show_all;
+}
+
+sub host_add_ok {
+ my $change = shift;
+ my $new_host_name = $host_name_entry->get_text;
+ if ($new_host_name eq "") {
+ return;
+ }
+ if (exists $cfg->{hosts}{$new_host_name} and not $change) {
+ my $window = Gtk2::Window->new;
+ my $vbox = Gtk2::VBox->new(0,10);
+ my $label = Gtk2::Label->new("Host $new_host_name already existent!");
+ my $ok_button = Gtk2::Button->new_from_stock ('gtk-ok');
+ $ok_button->signal_connect(clicked => sub {$window->destroy});
+ $vbox->set_border_width(10);
+ $vbox->pack_start_defaults($label);
+ $vbox->pack_end_defaults($ok_button);
+ $window->add($vbox);
+ $window->show_all;
+ return;
+ } else {
+ del_host($new_host_name);
+ #$cfg->{hosts}{$new_host_name} = {%hosts_default}; TODO ???
+ $cfg->{hosts}{$new_host_name} = {};
+ ${$cfg->{hosts}{$new_host_name}}{ip} =
+ $host_ip_entry->get_text if $host_ip_entry->get_text ne "";
+ ${$cfg->{hosts}{$new_host_name}}{ssh_args}{user} =
+ $ssh_user_entry->get_text if $ssh_user_entry->get_text ne "";
+ my @ssh_ident = split ",", $ssh_ident_entry->get_text;
+ ${$cfg->{hosts}{$new_host_name}}{ssh_args}{protocol} =
+ $ssh_proto_entry->get_value_as_int;
+ ${$cfg->{hosts}{$new_host_name}}{ssh_args}{identity_files} =
\@ssh_ident;
+
+ ${$cfg->{hosts}{$new_host_name}}{log_path} =
+ $log_path_entry->get_text if $log_path_entry->get_text ne "";
+
+# interface($host_name,$host_name,$host_ip_entry->get_text,"");
+ }
+ init();
+ $host_window->destroy();
+# $window->set_accept_focus(1)
+}
+
+sub if_add {
+ my $host = shift;
+ interface($host,"","","");
+}
+
+sub interface {
+ my $host = shift;
+ my $if = shift;
+ my $ip = shift;
+ my $alias = shift;
+ $if_host = $host;
+ if ($if eq "") {$if = $host;}
+ $if_window = Gtk2::Window->new;
+ $if_window->set_title ('Interface');
+ $if_window->set_border_width(3);
+ my $if_table = Gtk2::Table->new (2, 2, 1);
+
+ my $host_label = Gtk2::Label->new ("Host:");
+ $if_table->attach_defaults($host_label,0,1,0,1);
+
+ my $host_name = Gtk2::Label->new ($host);
+ $if_table->attach_defaults($host_name,1,2,0,1);
+
+ my $if_name_label = Gtk2::Label->new ("Interface name:");
+ $if_table->attach_defaults($if_name_label,0,1,1,2);
+
+ my $if_name_entry = Gtk2::Entry->new_with_max_length(50);
+ $if_name_entry->signal_connect( "activate", \&if_name_entry_action);
+ $if_name_entry->signal_connect( "focus-out-event", \&if_name_entry_action);
+ $if_name_entry->set_text($if);
+ $if_table->attach_defaults($if_name_entry,1,2,1,2);
+
+ my $if_ip_label = Gtk2::Label->new ("Interface IP:");
+ $if_table->attach_defaults($if_ip_label,0,1,2,3);
+
+ my $if_ip_entry = Gtk2::Entry->new_with_max_length(20);
+ $if_ip_entry->signal_connect( "activate", \&if_ip_entry_action);
+ $if_ip_entry->signal_connect( "focus-out-event", \&if_ip_entry_action);
+ $if_ip_entry->set_text($ip);
+ $if_table->attach_defaults($if_ip_entry,1,2,2,3);
+
+ my $if_alias_label = Gtk2::Label->new ("Interface alias:");
+ $if_table->attach_defaults($if_alias_label,0,1,3,4);
+
+ my $if_alias_entry = Gtk2::Entry->new_with_max_length(20);
+ $if_alias_entry->signal_connect( "activate", \&if_alias_entry_action);
+ $if_alias_entry->signal_connect( "focus-out-event",
\&if_alias_entry_action);
+ $if_alias_entry->set_text($alias);
+ $if_table->attach_defaults($if_alias_entry,1,2,3,4);
+
+ my $cancel_button = Gtk2::Button->new_from_stock ('gtk-cancel');
+ $if_table->attach_defaults($cancel_button,0,1,4,5);
+ $cancel_button->signal_connect(clicked => sub {$if_window->destroy();
+# $window->set_accept_focus(1);
+ });
+
+ my $ok_button = Gtk2::Button->new_from_stock ('gtk-ok');
+ $if_table->attach_defaults($ok_button,1,2,4,5);
+ $ok_button->signal_connect(clicked => \&if_add_ok);
+
+ $if_window->add($if_table);
+# $window->set_accept_focus(0);#???????????????????????????????????
+ $if_window->show_all;
+}
+
+sub if_name_entry_action {
+ my $id = shift;
+ $new_if_name = $id->get_text;
+ return 0;
+}
+
+sub if_ip_entry_action {
+ my $id = shift;
+ $new_if_ip = $id->get_text;
+ return 0;
+}
+
+sub if_alias_entry_action {
+ my $id = shift;
+ $new_if_alias = $id->get_text;
+ return 0;
+}
+
+sub if_add_ok {
+ ${$cfg->{hosts}{$if_host}{"interfaces"}}{$new_if_name} = {ip => $new_if_ip,
+ shortname =>
$new_if_alias};
+ redraw_s_if();
+ redraw_r_if();
+ $if_window->destroy();
+# $window->set_accept_focus(1)
+}
+
+sub if_edit {
+ my $host = shift;
+ my $if = shift;
+ my $ip = ${$cfg->{hosts}}{$host}{"interfaces"}{$if}{ip};
+ my $alias = ${$cfg->{hosts}}{$host}{"interfaces"}{$if}{shortname};
+ interface($host,$if,$ip,$alias);
+}
+
+sub if_del {
+ my $host = shift;
+ my $if = shift;
+ foreach my $sender (keys %{$cfg->{routes}}) {
+ foreach my $receiver (keys %{$cfg->{routes}{$sender}}) {
+ if ($sender eq $if or $receiver eq $if) {
+ delete $cfg->{routes}{$sender}{$receiver};
+ }
+ }
+ }
+ delete $cfg->{routes}{$if};
+ delete ${$cfg->{hosts}{$host}{"interfaces"}}{$if};
+ redraw_s_if();
+ redraw_r_if();
+}
+
+sub del_host {
+ my $host = shift;
+ if (exists ${$cfg->{hosts}}{$host}) {
+ foreach my $if (keys %{${$cfg->{hosts}}{$host}{"interfaces"}}) {
+ if_del($host,$if);
+ }
+ delete ${$cfg->{hosts}}{$host};
+ }
+ init();
+}
+
+sub remove_mea {
+ splice @{$cfg->{routes}{$active_s_if}{$active_r_if}}, $active_measurement,
1;
+ %routes = %{$cfg->{routes}};
+ $active_measurement--;
+ redraw_measurement();
+}
+
+sub add_mea {
+ push @{$cfg->{routes}{$active_s_if}{$active_r_if}}, {%routes_default};
+ %routes = %{$cfg->{routes}};
+ $active_measurement++;
+ redraw_measurement();
+}
+
+sub measurement_action {
+ my $id = shift;
+ my $name = $id->get_name();
+ $active_measurement = $id->get_value();
+ redraw_measurement();
+}
+
+sub save_as {
+ $fileselection = Gtk2::FileSelection->new ("Save new HADES config file");
+ $fileselection->set_select_multiple (0);
+ $fileselection->ok_button->signal_connect( clicked => \&saveas_ok);
+ $fileselection->cancel_button->signal_connect( clicked => sub
{$fileselection->destroy});
+ $fileselection->show_all();
+}
+
+sub saveas_ok {
+ my @filelist = $fileselection->get_filename;
+ $configfile = $filelist[0];
+ configfile2name();
+ save();
+ $fileselection->destroy;
+}
+
+sub fileopen {
+ $fileselection = Gtk2::FileSelection->new ("Open HADES config file");
+ $fileselection->set_select_multiple (0);
+ $fileselection->ok_button->signal_connect( clicked => \&file_ok);
+ $fileselection->cancel_button->signal_connect( clicked => \&file_cancel);
+ $fileselection->show_all();
+}
+
+sub file_ok {
+ my @filelist = $fileselection->get_selections;
+ my $file = $filelist[0];
+ if (-f $file) {
+ $config = Hades::Config->new(configfile => $file, use_argv => 0);
+ $config->init(@_);
+ $saver = Hades::Config::FileSaver->new(config => $config);
+ $cfg = $config->{config};
+ init();
+ }
+ $fileselection->destroy;
+}
+
+sub file_cancel {
+ $fileselection->destroy;
+}
+
+sub init {
+ $configfile = $cfg->{configfile};
+ configfile2name();
+ %hosts = %{$cfg->{hosts}};
+ @hosts = keys %hosts;
+ @hosts = sort {$a cmp $b} @hosts;
+ %routes = %{$cfg->{routes}};
+ %routes_default = %{$cfg->{routes_default}};
+ redraw_sender();
+ redraw_s_if();
+ redraw_receiver();
+ redraw_r_if();
+ redraw_measurement();
+}
+
+sub configfile2name {
+# if ($configfile =~ m/ipqos[-_](\w+)\.(conf|cfg)/) {
+# $configname = $1;
+# }
+ $cfg->{config} = $configfile;
+ redraw_configuration();
+# print "$configname\n";
+}
+
+sub save {
+ #print Dumper($cfg,$config->{config}); DEBUG
+ $cfg->{routes_default} = \%routes_default;
+ $saver->save($configfile);
+}
+
+sub redraw_configuration {
+ $table_configuration->destroy;
+ $table_configuration = Gtk2::Table->new (2, 2, 0);
+
+ my $domain_name_label = Gtk2::Label->new ("Measurement domain:");
+ my $domain_name_entry = Gtk2::Entry->new_with_max_length(20);
+ $domain_name_entry->set_text($cfg->{domain});
+ $domain_name_entry->signal_connect( "activate",
\&domain_name_entry_action);
+ $domain_name_entry->signal_connect( "focus-out-event",
\&domain_name_entry_action);
+ $table_configuration->attach_defaults($domain_name_label,0,1,0,1);
+ $table_configuration->attach_defaults($domain_name_entry,1,2,0,1);
+
+ my $sleeptime_label = Gtk2::Label->new ("Analyzer sleep time / s:");
+ my $sleeptime_spinbutton = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($cfg->{sleeptime}, 0, 10000, 1, 10, 10),1,0);
+ $sleeptime_spinbutton->signal_connect("value-changed" =>
\&sleeptime_action);
+ $table_configuration->attach_defaults($sleeptime_label,0,1,1,2);
+ $table_configuration->attach_defaults($sleeptime_spinbutton,1,2,1,2);
+
+ my $portbase_label = Gtk2::Label->new ("Lowest UDP port:");
+ my $portbase_spinbutton = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($cfg->{portbase}, 1024, 65535, 1, 1000, 1000),1,0);
+ $portbase_spinbutton->signal_connect("value-changed" => \&portbase_action);
+ $table_configuration->attach_defaults($portbase_label,0,1,2,3);
+ $table_configuration->attach_defaults($portbase_spinbutton,1,2,2,3);
+
+ my $dummy_label = Gtk2::Label->new ("");
+ $table_configuration->attach_defaults($dummy_label,2,3,3,4);
+
+ $notebook->append_page($table_configuration, "Configuration");
+ $table_configuration->show_all;
+}
+
+sub domain_name_entry_action {
+ my $id = shift;
+ $cfg->{domain} = $id->get_text;
+ return 0;
+}
+
+sub sleeptime_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $cfg->{sleeptime} = $value;
+}
+
+sub portbase_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $cfg->{portbase} = $value;
+}
+
+sub redraw_defaults {
+ $table_defaults->destroy;
+ $table_defaults = Gtk2::Table->new (2, 2, 0);
+ my $column = 0;
+
+ my $dummy_label = Gtk2::Label->new ("");
+ $table_defaults->attach_defaults($dummy_label,2,3,$column,$column+1);
+ $column++;
+
+ my $label_def_packetsize = Gtk2::Label->new ("Packet Size:");
+ my $spinbutton_def_packetsize =
Gtk2::SpinButton->new(Gtk2::Adjustment->new ($routes_default{packetsize}, 40,
1500, 1, 10, 10),1,0);
+ $spinbutton_def_packetsize->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_def_packetsize->signal_connect("value-changed" =>
\&def_packetsize_action);
+
$table_defaults->attach_defaults($label_def_packetsize,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($spinbutton_def_packetsize,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_interval = Gtk2::Label->new ("Interval:");
+ my $spinbutton_def_interval = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($routes_default{interval}, 0, 1e9, 1, 1000000, 1000000),1,0);
+ $spinbutton_def_interval->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_def_interval->signal_connect("value-changed" =>
\&def_interval_action);
+
$table_defaults->attach_defaults($label_def_interval,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($spinbutton_def_interval,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_groupsize = Gtk2::Label->new ("Groupsize:");
+ my $spinbutton_def_groupsize = Gtk2::SpinButton->new(Gtk2::Adjustment->new
($routes_default{groupsize}, 1, 100, 1, 10, 10),1,0);
+ $spinbutton_def_groupsize->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_def_groupsize->signal_connect("value-changed" =>
\&def_groupsize_action);
+
$table_defaults->attach_defaults($label_def_groupsize,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($spinbutton_def_groupsize,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_packetinterval = Gtk2::Label->new ("Packetinterval:");
+ my $spinbutton_def_packetinterval =
Gtk2::SpinButton->new(Gtk2::Adjustment->new ($routes_default{packetinterval},
0, 1e6, 1, 1000, 1000),1,0);
+ $spinbutton_def_packetinterval->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_def_packetinterval->signal_connect("value-changed" =>
\&def_packetinterval_action);
+
$table_defaults->attach_defaults($label_def_packetinterval,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($spinbutton_def_packetinterval,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_transmittime = Gtk2::Label->new ("Transmittime:");
+ my $spinbutton_def_transmittime =
Gtk2::SpinButton->new(Gtk2::Adjustment->new ($routes_default{transmittime},
0, 1e6, 1, 1000, 1000),1,0);
+ $spinbutton_def_transmittime->set_size_request($measurement_val_width,
$measurement_val_height);
+ $spinbutton_def_transmittime->signal_connect("value-changed" =>
\&def_transmittime_action);
+
$table_defaults->attach_defaults($label_def_transmittime,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($spinbutton_def_transmittime,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_precedence = Gtk2::Label->new("Precedence:");
+ my $combo_def_precedence = Gtk2::Combo->new();
+ $combo_def_precedence->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_def_precedence->set_popdown_strings ("0x0","0xb8");
+ $combo_def_precedence->entry->set_text($routes_default{precedence});
+ $combo_def_precedence->entry->signal_connect("activate" =>
\&def_precedence_action);
+ $combo_def_precedence->entry->signal_connect("focus-out-event" =>
\&def_precedence_action);
+
$table_defaults->attach_defaults($label_def_precedence,0,1,$column,$column+1);
+
$table_defaults->attach_defaults($combo_def_precedence,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_alert = Gtk2::Label->new("Alert:");
+ my $combo_def_alert = Gtk2::Combo->new();
+ $combo_def_alert->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_def_alert->set_popdown_strings ("no", "yes");
+ $combo_def_alert->entry->set_text($routes_default{alert} ? "yes" : "no");
+ $combo_def_alert->entry->signal_connect("activate" => \&def_alert_action);
+ $combo_def_alert->entry->signal_connect("focus-out-event" =>
\&def_alert_action);
+ $table_defaults->attach_defaults($label_def_alert,0,1,$column,$column+1);
+ $table_defaults->attach_defaults($combo_def_alert,1,2,$column,$column+1);
+ $column++;
+
+ my $label_def_verbose = Gtk2::Label->new ("Verbose:");
+ my $combo_def_verbose = Gtk2::Combo->new();
+ $combo_def_verbose->set_size_request($measurement_val_width,
$measurement_val_height);
+ $combo_def_verbose->set_popdown_strings ("no", "yes");
+ $combo_def_verbose->entry->set_text($routes_default{verbose} ? "yes" :
"no");
+ $combo_def_verbose->entry->signal_connect("activate" =>
\&def_verbose_action);
+ $combo_def_verbose->entry->signal_connect("focus-out-event" =>
\&def_verbose_action);
+ $table_defaults->attach_defaults($label_def_verbose,0,1,$column,$column+1);
+ $table_defaults->attach_defaults($combo_def_verbose,1,2,$column,$column+1);
+ $column++;
+
+ my $dummy_label2 = Gtk2::Label->new ("");
+ $table_defaults->attach_defaults($dummy_label2,2,3,$column,$column+1);
+
+ $notebook->append_page($table_defaults, "Defaults");
+ $table_defaults->show_all;
+}
+
+sub def_packetsize_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $routes_default{packetsize} = $value;
+}
+
+sub def_interval_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $routes_default{interval} = $value;
+}
+
+sub def_groupsize_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $routes_default{groupsize} = $value;
+}
+
+sub def_packetinterval_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $routes_default{packetinterval} = $value;
+}
+
+sub def_transmittime_action {
+ my $id = shift;
+ my $value = $id->get_value();
+ $routes_default{transmittime} = $value;
+}
+
+sub def_precedence_action {
+ my $id = shift;
+ my $value = $id->get_text();
+ $routes_default{precedence} = $value;
+ return 0;
+}
+
+sub def_alert_action {
+ my $id = shift;
+ my $value = $id->get_text();
+ $routes_default{alert} = $value eq "yes" ? 1 : 0;
+ return 0;
+}
+
+sub def_verbose_action {
+ my $id = shift;
+ my $value = $id->get_text();
+ $routes_default{verbose} = $value eq "yes" ? 1 : 0;
+ return 0;
+}
Property changes on: trunk/build/HADES/bin/hades-cfg-gui.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-cfg-test.pl
===================================================================
--- trunk/build/HADES/bin/hades-cfg-test.pl (rev
0)
+++ trunk/build/HADES/bin/hades-cfg-test.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,174 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+# - Add special treatment for map stuff
+# - Add more error messages and checks
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Socket;
+use Pod::Usage;
+
+use Hades;
+
+
+create_config(
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+
+
+my @warnings = ();
+
+foreach my $key ( sort keys %config ) {
+ next if $key eq "hosts" or $key eq "routes" or $key eq "interfaces";
+ print "$key = $config{$key}\n";
+}
+
+print "\n\n# HOSTS
########################################################\n";
+my %hosts = get_hosts();
+foreach my $host (sort keys %hosts) {
+ print "\n$host\n";
+ foreach my $key ( sort keys %{$hosts{$host}} ) {
+ next if $key eq "hostid" or $key eq "interfaces";
+ if ( $key eq "ssh_args" ) {
+ print " $key =\n";
+ foreach (sort keys %{$hosts{$host}->{$key}}) {
+ print " $_ = $hosts{$host}->{$key}->{$_}\n";
+ }
+ } else {
+ print " $key = " . $hosts{$host}->{$key} . "\n";
+ }
+ }
+ my $ip = ssh_ip($hosts{$host});
+ #next unless defined $ip;
+ print " ---------------------------\n";
+ print " SSH-IP: $ip\n";
+ my $name = name($ip);
+ if ($verbose) {
+ if ($name) {
+ print " DNS: " . name($ip) . "\n";
+ } else {
+ print " Cannot resolve DNS name!\n";
+ push @warnings, "Cannot resolve DNS name for $ip!";
+ }
+ }
+}
+
+print "\n\n# INTERFACES (extracted from HOSTS)
#############################\n";
+my %interfaces = get_interfaces();
+foreach my $interface (sort keys %interfaces) {
+ print "\n$interface (" . $interfaces{$interface}->{host}->{hostid} . ")\n";
+ foreach my $key ( sort keys %{$interfaces{$interface}} ) {
+ next if $key eq "host" || $key eq "ifid";
+ print " $key = " . $interfaces{$interface}->{$key} . "\n";
+ }
+}
+
+print "\n\n# ROUTES
#######################################################\n";
+my %routes = get_routes();
+foreach my $senderif (sort keys %routes) {
+ foreach my $receiverif (sort keys %{$routes{$senderif}}) {
+ for (my $mid=0 ; $mid <= $#{$routes{$senderif}->{$receiverif}} ; $mid++)
{
+ print "\n$senderif -> $receiverif\n";
+ foreach my $key (sort keys
%{$routes{$senderif}->{$receiverif}->[$mid]}) {
+ print " $key = " .
+ $routes{$senderif}->{$receiverif}->[$mid]->{$key} . "\n";
+ }
+ unless (defined $interfaces{$senderif}->{host}) {
+ push @warnings,
+ "No host provides interface $senderif " .
+ "used as sender interface in \%routes!";
+ }
+ unless (defined $interfaces{$receiverif}->{host}) {
+ push @warnings,
+ "No host provides interface $receiverif " .
+ "used as receiver interface in \%routes!";
+ }
+ }
+ }
+}
+
+print "\n";
+
+if (@warnings) {
+ print "\n# WARNINGS
#######################################################\n\n";
+ $, = "\n";
+ print @warnings;
+}
+
+exit 0;
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-cfg-test.pl> - Test configuration file
+
+=head1 SYNOPSIS
+
+B<hades-cfg-test.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]>
+
+
+
+=head1 DESCRIPTION
+
+Loads the configuration file and prints out some things from the included
+information. It's main purpose is to test configuration files.
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+E.g. the hostname related to the SSH ip determined through DNS.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+
Property changes on: trunk/build/HADES/bin/hades-cfg-test.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-check-data-ippm-csv.pl
===================================================================
--- trunk/build/HADES/bin/hades-check-data-ippm-csv.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-check-data-ippm-csv.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,310 @@
+#!/usr/bin/perl
+
+#IMPORTANT
+# This script is based on hades-traceroute-anomalies.pl which is far more
+# sophisticated! Nevertheless this script is prepared to become as complex
+# as hades-traceroute-anomalies.pl ;)
+# In hades-data-traceroute.pl there are classes of anomalies. Do we need this
+# here also?
+
+
+#TODO
+# - error handling
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+
+use Hades;
+use Hades::Data::Finder;
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "mid=i" => undef,
+ "filter=s" => undef,
+ "loss!" => 1,
+ "lossrate=f" => 0,
+ "showdata!" => 0,
+) or pod2usage(2);
+
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone)
+ : DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+my $sender = $config{sender};
+my $receiver = $config{receiver};
+my $mid = $config{mid};
+
+my (@data,$warnings);
+my $finder = Hades::Data::Finder->new();
+if ($#ARGV >= 0) {
+ while (my $file = shift) {
+ my $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+ $data->isa("Hades::Data::IPPM_Aggregated")
+ or die "Not an aggregated IPPM data file ($file)\n";
+ push @data, $data;
+ }
+} elsif ($#ARGV < 0) {
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver); # set_route understands "undef"
+ $finder->set_type("ippm_aggregated");
+ $finder->set_mid($mid); # ignores "undef"
+ $finder->set_filter( { split /:/,$config{filter} } ) # filter with meta
data
+ if $config{filter};
+ @data = $finder->find;
+ unless (@data) {
+ print "No data found!\n";
+ exit 1;
+ }
+ #TODO Hades::Warnings ..... die join("\n", @$warnings)."\n" if @$warnings;
+}
+
+#
+# Display data
+#
+my $routes = 0;
+my $routes_anomalies = 0;
+foreach my $data (@data) {
+ $data->extract_data(); #TODO error handling.....
+ print Dumper $data->{warnings}->get(); # TODO
+ $data->calculate_statistics(); #TODO error handling.....
+ my $no_data = 0;
+ my $lossrate_high = 0;
+ my $duplicates = 0;
+ my $reordered = 0;
+ my $lossrate_percent = undef;
+ unless (ref($data->{data}) eq "ARRAY" && @{$data->{data}}) {
+ $no_data = 1 if $config{nodata};
+ } else {
+ $lossrate_percent = ($data->{statistics}->{lost_packets} /
+ $data->{statistics}->{total_packets}) * 100;
+ if ($config{loss}) {
+ if ($config{lossrate} == 0) {
+ # No rate limit => report every loss
+ if ($data->{statistics}->{lost_packets} != 0) {
+ $lossrate_high = 1;
+ }
+ } else {
+ # Use lossrate limit
+ if ($lossrate_percent >= $config{lossrate}) {
+ $lossrate_high = 1;
+ }
+ }
+ }
+ }
+ my @anomalies = ();
+ if ($lossrate_high) {
+ push @anomalies, "Too many lost packets (" .
+ "$data->{statistics}->{lost_packets} out of " .
+ "$data->{statistics}->{total_packets}, " .
+ sprintf("%.2f%%", $lossrate_percent) .
+ ")";
+ }
+ if (@anomalies) {
"$data->{sender},$data->{receiver},$data->{mid},$data->{statistics}->{lost_packets},$data->{statistics}->{total_packets},$lossrate_percent\n";
+ $routes_anomalies++;
+ }
+ $routes++;
+}
+
+
+exit 0;
+
+
+__END__
+
+
+=head1 NAME
+
+B<hades-check-data-ippm-csv.pl> - Find anomalies in IPPM data.
+
+=head1 SYNOPSIS
+
+B<hades-check-data-ippm-csv.pl> S<[B<--help>]>
+
+B<hades-check-data-ippm-csv.pl> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]>
+ S<[B<--from>=I<DATE>]> S<[B<--to>=I<DATE>]>
+ S<[B<--sender>=I<INTERFACE>]> S<[B<--receiver>=I<INTERFACE>]>
+ S<[B<--mid>=I<MID>]> S<[B<--filter>=I<NAME:VALUE:NAME:VALUE...>]>
+ S<[B<--[no]loss>]> S<[B<--lossrate>[=I<RATE>]>
+
+B<hades-check-data-ippm-csv.pl> S<[B<--[no]verbose>]>
+ S<[B<--[no]loss>]> S<[B<--lossrate>[=I<RATE>]>
+ F<DATAFILE>...
+
+
+=head1 DESCRIPTION
+
+TODO
+
+The script has two basic working modes:
+
+=over
+
+=item 1.
+
+By using various command line parameters you can specify meta data
+about the IPPM data you want to check for anomalies.
+
+=item 2.
+
+If one ore more data files are specified on the command line, the script
tries
+to load the IPPM data to be checked from these files.
+
+=back
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--from>=I<DATE>
+
+Set the start date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: today 00:00:00 (aka. beginning of today)
+
+
+=item B<--to>=I<DATE>
+
+Set the end date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: now
+
+
+=item B<--timezone>=I<TZNAME>
+
+The I<TZNAME> given is a "link" name in the Olson database. The time zone it
+represents is used as default time zone for the above parameters
+B<--from> and B<--to>.
+
+Default: local time zone
+
+
+=item B<--sender>=I<INTERFACE>
+
+Set sender interface.
+
+Default: none
+
+
+=item B<--receiver>=I<INTERFACE>
+
+Set receiver interface.
+
+Default: none
+
+
+=item B<--mid>=I<MID>
+
+Set MID (Measurement ID). Together with sender, receiver a unique
+identifier of a IPPM measurement.
+
+Default: none
+
+
+=item B<--filter>=I<NAME:VALUE:NAME:VALUE...>
+
+Filter using meta data. Normally meta data is organizied in key-value pairs.
+This parameter allows you to set the desired keys and values.
+
+Default: none
+
+
+=item B<--[no]loss>
+
+Report to many packet losses as anomaly.
+
+See S<B<--lossrate>> below for specifying when a packet loss is too high.
+
+Default: enabled
+
+
+=item B<--lossrate>[=I<RATE>]
+
+If S<B<--loss>> is set, a loss anomaly is detected depending on the value you
+can specify here. B<RATE> is a percent value without the percentage sign (%).
+If you omit the value or set it to "0", every loss will be detected as
anomaly.
+This is the default!
+
+Default: 0
+
+
+=item B<--[no]showdata>
+
+This parameter eases your workflow by printing the showdata.pl command lines
+to use for viewing the listed IPPM data.
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Find all anomalies for today (using default configuration):
+
+ hades-check-data-ippm-csv.pl
+
+Find anomalies in a data file:
+
+ hades-check-data-ippm-csv.pl Erlangen_Uni.Leipzig_Uni.qos_ai.dat
+
Property changes on: trunk/build/HADES/bin/hades-check-data-ippm-csv.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-check-data-ippm.pl
===================================================================
--- trunk/build/HADES/bin/hades-check-data-ippm.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-check-data-ippm.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,345 @@
+#!/usr/bin/perl
+
+#IMPORTANT
+# This script is based on hades-traceroute-anomalies.pl which is far more
+# sophisticated! Nevertheless this script is prepared to become as complex
+# as hades-traceroute-anomalies.pl ;)
+# In hades-data-traceroute.pl there are classes of anomalies. Do we need this
+# here also?
+
+
+#TODO
+# - error handling
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+
+use Hades;
+use Hades::Data::Finder;
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "mid=i" => undef,
+ "filter=s" => undef,
+ "nodata!" => 1,
+ "loss!" => 1,
+ "lossrate=f" => 0,
+ "duplicates!" => 1,
+ "reordered!" => 1,
+ "showdata!" => 0,
+) or pod2usage(2);
+
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone)
+ : DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+my $sender = $config{sender};
+my $receiver = $config{receiver};
+my $mid = $config{mid};
+
+my (@data,$warnings);
+my $finder = Hades::Data::Finder->new();
+if ($#ARGV >= 0) {
+ while (my $file = shift) {
+ my $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+ $data->isa("Hades::Data::IPPM_Aggregated")
+ or die "Not an aggregated IPPM data file ($file)\n";
+ push @data, $data;
+ }
+} elsif ($#ARGV < 0) {
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver); # set_route understands "undef"
+ $finder->set_type("ippm_aggregated");
+ $finder->set_mid($mid); # ignores "undef"
+ $finder->set_filter( { split /:/,$config{filter} } ) # filter with meta
data
+ if $config{filter};
+ @data = $finder->find;
+ unless (@data) {
+ print "No data found!\n";
+ exit 1;
+ }
+ #TODO Hades::Warnings ..... die join("\n", @$warnings)."\n" if @$warnings;
+}
+
+#
+# Display data
+#
+my $routes = 0;
+my $routes_anomalies = 0;
+foreach my $data (@data) {
+ $data->extract_data(); #TODO error handling.....
+ print Dumper $data->{warnings}->get(); # TODO
+ $data->calculate_statistics(); #TODO error handling.....
+ my $no_data = 0;
+ my $lossrate_high = 0;
+ my $duplicates = 0;
+ my $reordered = 0;
+ my $lossrate_percent = undef;
+ unless (ref($data->{data}) eq "ARRAY" && @{$data->{data}}) {
+ $no_data = 1 if $config{nodata};
+ } else {
+ $lossrate_percent = ($data->{statistics}->{lost_packets} /
+ $data->{statistics}->{total_packets}) * 100;
+ if ($config{loss}) {
+ if ($config{lossrate} == 0) {
+ # No rate limit => report every loss
+ if ($data->{statistics}->{lost_packets} != 0) {
+ $lossrate_high = 1;
+ }
+ } else {
+ # Use lossrate limit
+ if ($lossrate_percent >= $config{lossrate}) {
+ $lossrate_high = 1;
+ }
+ }
+ }
+ if ($config{duplicates} && $data->{statistics}->{duplicate_packets}) {
+ $duplicates = $data->{statistics}->{duplicate_packets};
+ }
+ if ($config{reordered}) {
+ my $foo = $data->{statistics}->{reordered_packets};
+ shift @{$foo}; # First value is empty, should be ALL not reordered
+ if (@{$foo}) {
+ $reordered = join " ", @{$foo};
+ }
+ }
+ }
+ my @anomalies = ();
+ if ($no_data) {
+ push @anomalies, "No data found";
+ }
+ if ($lossrate_high) {
+ push @anomalies, "Too many lost packets (" .
+ "$data->{statistics}->{lost_packets} out of " .
+ "$data->{statistics}->{total_packets}, " .
+ sprintf("%.2f%%", $lossrate_percent) .
+ ")";
+ }
+ if ($duplicates) {
+ push @anomalies, "$duplicates duplicate packet(s) occured";
+ }
+ if ($reordered) {
+ push @anomalies, "Reordered packet(s) occured: $reordered";
+ }
+ if (@anomalies) {
+ print "$data->{sender} -> $data->{receiver} ($data->{mid}):\n";
+ print " $config{bindir}/hades-show-data.pl \\\n" .
+ " --config=$config{configfile} \\\n" .
+ " --type=ippm_aggregated --mid=$data->{mid} \\\n" .
+ " --sender=$data->{sender} --receiver=$data->{receiver}\n"
+ if $config{showdata};
+ #TODO $data->{start_time}, $data->{end_time} ??
+ print join("\n", @anomalies);
+ print "\n\n";
+ $routes_anomalies++;
+ }
+ $routes++;
+}
+
+#if ($fatal || $curious || $strange) {
+ print "\nSummary: $routes_anomalies out of $routes (" .
+ sprintf("%.2f",($routes_anomalies/$routes)*100) .
+ "%) showed anomalies.\n";
+#}
+
+exit 0;
+
+
+__END__
+
+
+=head1 NAME
+
+B<hades-check-data-ippm.pl> - Find anomalies in IPPM data.
+
+=head1 SYNOPSIS
+
+B<hades-check-data-ippm.pl> S<[B<--help>]>
+
+B<hades-check-data-ippm.pl> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]>
+ S<[B<--from>=I<DATE>]> S<[B<--to>=I<DATE>]>
+ S<[B<--sender>=I<INTERFACE>]> S<[B<--receiver>=I<INTERFACE>]>
+ S<[B<--mid>=I<MID>]> S<[B<--filter>=I<NAME:VALUE:NAME:VALUE...>]>
+ S<[B<--[no]loss>]> S<[B<--lossrate>[=I<RATE>]>
+
+B<hades-check-data-ippm.pl> S<[B<--[no]verbose>]>
+ S<[B<--[no]loss>]> S<[B<--lossrate>[=I<RATE>]>
+ F<DATAFILE>...
+
+
+=head1 DESCRIPTION
+
+TODO
+
+The script has two basic working modes:
+
+=over
+
+=item 1.
+
+By using various command line parameters you can specify meta data
+about the IPPM data you want to check for anomalies.
+
+=item 2.
+
+If one ore more data files are specified on the command line, the script
tries
+to load the IPPM data to be checked from these files.
+
+=back
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--from>=I<DATE>
+
+Set the start date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: today 00:00:00 (aka. beginning of today)
+
+
+=item B<--to>=I<DATE>
+
+Set the end date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: now
+
+
+=item B<--timezone>=I<TZNAME>
+
+The I<TZNAME> given is a "link" name in the Olson database. The time zone it
+represents is used as default time zone for the above parameters
+B<--from> and B<--to>.
+
+Default: local time zone
+
+
+=item B<--sender>=I<INTERFACE>
+
+Set sender interface.
+
+Default: none
+
+
+=item B<--receiver>=I<INTERFACE>
+
+Set receiver interface.
+
+Default: none
+
+
+=item B<--mid>=I<MID>
+
+Set MID (Measurement ID). Together with sender, receiver a unique
+identifier of a IPPM measurement.
+
+Default: none
+
+
+=item B<--filter>=I<NAME:VALUE:NAME:VALUE...>
+
+Filter using meta data. Normally meta data is organizied in key-value pairs.
+This parameter allows you to set the desired keys and values.
+
+Default: none
+
+
+=item B<--[no]loss>
+
+Report to many packet losses as anomaly.
+
+See S<B<--lossrate>> below for specifying when a packet loss is too high.
+
+Default: enabled
+
+
+=item B<--lossrate>[=I<RATE>]
+
+If S<B<--loss>> is set, a loss anomaly is detected depending on the value you
+can specify here. B<RATE> is a percent value without the percentage sign (%).
+If you omit the value or set it to "0", every loss will be detected as
anomaly.
+This is the default!
+
+Default: 0
+
+
+=item B<--[no]showdata>
+
+This parameter eases your workflow by printing the showdata.pl command lines
+to use for viewing the listed IPPM data.
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Find all anomalies for today (using default configuration):
+
+ hades-check-data-ippm.pl
+
+Find anomalies in a data file:
+
+ hades-check-data-ippm.pl Erlangen_Uni.Leipzig_Uni.qos_ai.dat
+
Property changes on: trunk/build/HADES/bin/hades-check-data-ippm.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-check-data-tracert.pl
===================================================================
--- trunk/build/HADES/bin/hades-check-data-tracert.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-check-data-tracert.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,422 @@
+#!/usr/bin/perl
+
+#TODO
+# - error handling
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+
+use Hades;
+use Hades::Data::Finder;
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "mid=i" => undef,
+ "filter=s" => undef,
+ "fatal!" => 2,
+ "dns!" => 2,
+ "curious!" => 2,
+ "strange!" => 2,
+ "showdata!" => 0,
+) or pod2usage(2);
+
+
+
+my ($fatal, $dns, $curious, $strange);
+if ($config{fatal} && $config{fatal} != 2) {
+ $fatal = 1; $dns = 0; $curious = 0; $strange = 0;
+} elsif ($config{dns} && $config{dns} != 2) {
+ $fatal = 0; $dns = 1; $curious = 0; $strange = 0;
+} elsif ($config{curious} && $config{curious} != 2) {
+ $fatal = 0; $dns = 0; $curious = 1; $strange = 0;
+} elsif ($config{strange} && $config{strange} != 2) {
+ $fatal = 0; $dns = 0; $curious = 0; $strange = 1;
+} else {
+ $fatal = $config{fatal};
+ $dns = $config{dns};
+ $curious = $config{curious};
+ $strange = $config{strange};
+}
+
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone)
+ : DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+my $sender = $config{sender};
+my $receiver = $config{receiver};
+my $mid = $config{mid};
+
+my (@data,$warnings);
+my $finder = Hades::Data::Finder->new();
+if ($#ARGV >= 0) {
+ while (my $file = shift) {
+ my $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+ $data->isa("Hades::Data::Traceroute")
+ or die "Not an aggregated traceroute data file ($file)\n";
+ push @data, $data;
+ }
+} elsif ($#ARGV < 0) {
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver); # set_route understands "undef"
+ $finder->set_type("traceroute");
+ $finder->set_mid($mid); # ignores "undef"
+ $finder->set_filter( { split /:/,$config{filter} } ) # filter with meta
data
+ if $config{filter};
+ @data = $finder->find;
+ unless (@data) {
+ print "No data found!\n";
+ exit 1;
+ }
+ #TODO Hades::Warnings ..... die join("\n", @$warnings)."\n" if @$warnings;
+}
+
+#
+# Display data
+#
+my $routes = 0;
+my $routes_anomalies = 0;
+my %dns_problems = (); # DNS problems for every box
+ # (1 = sometimes failing ; 2 = always failing)
+foreach my $data (@data) {
+ $data->extract_data(); #TODO error handling.....
+ print Dumper $data->{warnings}->get(); # TODO
+ my $ip_ok = 0;
+ my $ip_unknown = 0;
+ my $name_ok = 0;
+ my $name_unknown = 0;
+ my $missing_hops = 0;
+ my $last_ok = 0;
+ my $last_unknown = 0;
+ for (my $i=0 ; $i <= $#{$data->{traceroutes}} ; $i++) {
+ if ( $data->{traceroutes}->[$i]->[-1]->{name} eq "UNKNOWN" &&
+ $data->{traceroutes}->[$i]->[-1]->{ip} eq "UNKNOWN" ) {
+ $last_unknown++;
+ } else {
+ $last_ok++;
+ }
+ for (my $j=0 ; $j <= $#{$data->{traceroutes}->[$i]} ; $j++) {
+ unless ( defined($data->{traceroutes}->[$i]->[$j]) ) {
+ $missing_hops++;
+ next;
+ }
+ if ($data->{traceroutes}->[$i]->[$j]->{name} eq "UNKNOWN") {
+ $name_unknown++;
+ } else {
+ $name_ok++;
+ }
+ if ($data->{traceroutes}->[$i]->[$j]->{ip} eq "UNKNOWN") {
+ $ip_unknown++;
+ } else {
+ $ip_ok++;
+ }
+ }
+ }
+ my $traceroutes_ok = 0;
+ my $traceroutes_error = 0;
+ for (my $i=0 ; $i <= $#{$data->{timeline}} ; $i++) {
+ if (defined $data->{timeline}->[$i]->{ref}) {
+ $traceroutes_ok++;
+ } elsif (defined $data->{timeline}->[$i]->{error}) {
+ $traceroutes_error++;
+ }
+ }
+ my $changes = $#{$data->{timeline}} - 1;
+ # Should be right that way (Last entry only end time!)
+ my @anomalies = ();
+ if ($traceroutes_ok == 0) {
+ push @anomalies, "No valid traceroute in data file" if $fatal;
+ } else {
+ if ($traceroutes_error/$traceroutes_ok > 1) {
+ push @anomalies, "Traceroute execution often failing" if $curious;
+ } elsif ($traceroutes_error) {
+ push @anomalies,
+ "Traceroute execution sometimes ($traceroutes_error) failing"
+ if $curious;
+ }
+ if ($missing_hops) {
+ push @anomalies, "STRANGE ERROR: Missing hops in data file" if
$strange;
+ }
+ if ($name_ok == 0 && $ip_ok == 0) {
+ push @anomalies, "Cannot reach any hop" if $fatal;
+ } else {
+ if ($ip_ok == 0) {
+ push @anomalies, "STRANGE ERROR: More DNS names than IP addresses"
+ if $strange;
+ } elsif ($ip_unknown/$ip_ok > 1 && $last_ok > 0) {
+ # "Target unreachable" leads automatically to "many hops unreachable"
+ push @anomalies, "Many hops unreachable" if $curious;
+ }
+ if ($name_ok == 0) { # $ip_ok =! 0 !!
+ if (!defined($dns_problems{$data->{sender}}) ||
+ $dns_problems{$data->{sender}} != 1) {
+ # Set to "always failing", if not already "sometimes failing"
+ $dns_problems{$data->{sender}} = 2;
+ }
+ } elsif (($name_unknown-$ip_unknown)/$name_ok > 1) {
+ $dns_problems{$data->{sender}} = 1;
+ }
+ if ($last_ok == 0) {
+ push @anomalies, "Target unreachable" if $fatal;
+ } elsif ($last_unknown/$last_ok > 1) {
+ push @anomalies, "Target often unreachable" if $curious;
+ }
+ }
+ if ($last_ok+$last_unknown > 10) { # Number of different routes
+ push @anomalies, "More than 10 different routes" if $curious;
+ #TODO might be because of DNS problems. Ignore it somehow?
+ }
+ if ($changes > 10) {
+ push @anomalies, "More than 10 routing changes" if $curious;
+ }
+ }
+ if (@anomalies) {
+ print "$data->{receiver} -> $data->{sender} ($data->{mid}):\n";
+ print " $config{bindir}/hades-show-data.pl \\\n" .
+ " --config=$config{configfile} \\\n" .
+ " --type=traceroute --mid=$data->{mid} \\\n" .
+ " --sender=$data->{sender} --receiver=$data->{receiver}\n"
+ if $config{showdata};
+ #TODO $data->{start_time}, $data->{end_time} ??
+ print join("\n", @anomalies);
+ print "\n\n";
+ $routes_anomalies++;
+ }
+ $routes++;
+}
+
+if ($fatal || $curious || $strange) {
+ print "\nSummary: $routes_anomalies out of $routes (" .
+ sprintf("%.2f",($routes_anomalies/$routes)*100) .
+ "%) showed anomalies.\n";
+}
+
+if ($dns) {
+ print "\nDNS check:\n";
+ foreach my $host (sort keys %dns_problems) {
+ print "$host: ";
+ if ($dns_problems{$host} == 2) {
+ print "No working DNS\n";
+ } else {
+ print "Name lookup often failing\n";
+ }
+ }
+}
+exit 0;
+
+
+__END__
+
+
+=head1 NAME
+
+B<hades-check-data-tracert.pl> - Find anomalies in traceroute data.
+
+=head1 SYNOPSIS
+
+B<hades-check-data-tracert.pl> S<[B<--help>]>
+
+B<hades-check-data-tracert.pl> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]>
+ S<[B<--from>=I<DATE>]> S<[B<--to>=I<DATE>]>
+ S<[B<--sender>=I<INTERFACE>]> S<[B<--receiver>=I<INTERFACE>]>
+ S<[B<--mid>=I<MID>]> S<[B<--filter>=I<NAME:VALUE:NAME:VALUE...>]>
+ S<[B<--[no]fatal>]> S<[B<--[no]curious>]> S<[B<--[no]strange>]>
+ S<[B<--[no]dns>]> S<[B<--[no]showdata>]>
+
+B<hades-check-data-tracert.pl> S<[B<--[no]verbose>]>
+ S<[B<--[no]fatal>]> S<[B<--[no]curious>]> S<[B<--[no]strange>]>
+ S<[B<--[no]dns>]> S<[B<--[no]showdata>]>
+ F<DATAFILE>...
+
+
+=head1 DESCRIPTION
+
+TODO
+
+The script has two basic working modes:
+
+=over
+
+=item 1.
+
+By using various command line parameters you can specify meta data
+about the traceroute data you want to check for anomalies.
+
+=item 2.
+
+If one ore more data files are specified on the command line, the script
tries
+to load the traceroute data to be checked from these files.
+
+=back
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--from>=I<DATE>
+
+Set the start date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: today 00:00:00 (aka. beginning of today)
+
+
+=item B<--to>=I<DATE>
+
+Set the end date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: now
+
+
+=item B<--timezone>=I<TZNAME>
+
+The I<TZNAME> given is a "link" name in the Olson database. The time zone it
+represents is used as default time zone for the above parameters
+B<--from> and B<--to>.
+
+Default: local time zone
+
+
+=item B<--sender>=I<INTERFACE>
+
+Set sender interface.
+
+Default: none
+
+
+=item B<--receiver>=I<INTERFACE>
+
+Set receiver interface.
+
+Default: none
+
+
+=item B<--mid>=I<MID>
+
+Set MID (Measurement ID). Together with sender, receiver a unique
+identifier of a traceroute measurement.
+
+Default: none
+
+
+=item B<--filter>=I<NAME:VALUE:NAME:VALUE...>
+
+Filter using meta data. Normally meta data is organizied in key-value pairs.
+This parameter allows you to set the desired keys and values.
+
+Default: none
+
+
+=item B<--[no]fatal>
+
+This is a filter option. It can be used to limit the display of anomalies
+to I<fatal> anomalies by using S<B<--fatal>> as parameter. If S<B<--nofatal>>
+parameter is used, all data will be displayed I<except> the fatal data. You
+can suppress further types of anomalies by using more corresponding
+S<B<--no*>> filter parameters (see following parameters).
+
+I<Fatal> anomalies normally mean that the traceroute data is not usable.
+
+
+=item B<--[no]curious>
+
+This is a filter option. See S<B<--[no]fatal>> above.
+
+I<Curious> anomalies normally occur, when the traceroute data shows
significant
+problems, but should be usable.
+
+
+=item B<--[no]strange>
+
+This is a filter option. See S<B<--[no]fatal>> above.
+
+I<Strange> anomalies are anomalies that should never occur...
+
+
+=item B<--[no]dns>
+
+This is a filter option. See S<B<--[no]fatal>> above.
+
+I<DNS> anomalies are anomalies related to the DNS (Domain Name Service)
system.
+They are related to a box and can be quite certain distinguished from the
+others and are therefore a special type of anomalies.
+
+
+=item B<--[no]showdata>
+
+This parameter eases your workflow by printing the hades-show-data.pl command
+lines to use for viewing the listed traceroute data.
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Find all anomalies for today (using default configuration):
+
+ hades-check-data-tracert.pl
+
+Find anomalies in a data file:
+
+ hades-check-data-tracert.pl Erlangen_Uni.Leipzig_Uni.tracert.dat
+
Property changes on: trunk/build/HADES/bin/hades-check-data-tracert.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-check-data.pl
===================================================================
--- trunk/build/HADES/bin/hades-check-data.pl (rev
0)
+++ trunk/build/HADES/bin/hades-check-data.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,52 @@
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+use Getopt::Long 2.32 qw(:config auto_help auto_version bundling);
+
+my $verbose = 0;
+my $repair = 0;
+
+GetOptions(
+ "verbose|v!" => \$verbose,
+ "repair|r!" => \$repair,
+) or pod2usage(2);
+
+foreach my $data (@ARGV) {
+ open DATA, $data or die "$!";
+ my @content = <DATA>;
+ my $corrupted = 0;
+ for (my $i=0 ; $i<=$#content ; $i++) {
+ my $line = $content[$i];
+ next unless $line =~ /\S+/;
+ next if $line =~ /^#/;
+ next if $line =~
/^\d+:\d{10}:-?\d{8,10}:\d{10}:-?\d{8,10}:\d+:[GN]:[nu]:[GN]:[nu]$/;
+ # Line is (most likely) corrupted
+ unless ($corrupted) {
+ # First damaged line
+ $corrupted = 1;
+ if ($verbose) {
+ print "\n\n$data:\n";
+ } else {
+ print "$data\n";
+ last unless $repair; # We can stop now!
+ }
+ }
+ print "$line" if $verbose; # print corrupted line
+ if ($repair) {
+ splice @content, $i, 1;
+ $i--; # Back one element
+ }
+ }
+ close DATA;
+ if ($repair) {
+ unlink $data;
+ open DATA, ">$data";
+ print DATA @content;
+ close DATA;
+ }
+}
+
+exit 0;
+
Property changes on: trunk/build/HADES/bin/hades-check-data.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-check-wrapper.pl
===================================================================
--- trunk/build/HADES/bin/hades-check-wrapper.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-check-wrapper.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,30 @@
+#!/usr/bin/perl
+
+
+use strict;
+use warnings;
+
+my $arg = "";
+while (my $next = shift @ARGV){
+ $arg = $arg . " $next";
+}
+
+my $checker = "/opt/hades/bin/hades-check-data-ippm-csv.pl $arg";
+
+my $now = time;
+my $date = $now - 172800;
+
+my @datearray = localtime ($date);
+
+my $year = $datearray[5]+1900;
+my $mon = sprintf ("%02d", $datearray[4]);
+$mon++;
+my $day = sprintf ("%02d", $datearray[3]);
+
+my $begin;
+my $end;
+
+$begin = "$year-$mon-$day 00:00:00";
+$end = "$year-$mon-$day 23:59:59";
+
+exec ("$checker --from='$begin' --to='$end'");
Property changes on: trunk/build/HADES/bin/hades-check-wrapper.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-cmd.pl
===================================================================
--- trunk/build/HADES/bin/hades-cmd.pl (rev 0)
+++ trunk/build/HADES/bin/hades-cmd.pl 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,204 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+# - WICHTIG: ssh-Zeug natuerlich mit Perl-Modul!!!
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Socket;
+use Pod::Usage;
+
+use Hades;
+
+create_config(
+ 'all' => 0,
+ 'user|sshuser=s' => undef,
+ 'interactive|sshinteractive!' => undef,
+ 'mode=s' => "ssh", # "perl" or "ssh"
+ 'forwardport' => undef
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $forwardport = $config{forwardport};
+
+my %ssh_args = ();
+$ssh_args{user} = $config{user} if $config{user};
+if (!defined $config{interactive}) {
+ # Not set (undef) => Default
+} elsif ($config{interactive} == 0) {
+ # Explicitly turned off!
+ $ssh_args{interactive} = 0;
+} elsif ($config{interactive}) {
+ # Explicitly turned on
+ $ssh_args{interactive} = 1;
+}
+
+my $sshcommand = "ssh ";
+$sshcommand .= " -o \"User $ssh_args{user}\"" if $ssh_args{user};
+ # -o "User bla" more compatible than -l, which is e.g. not supported by scp
+$sshcommand .= " -R 8000:127.0.0.1:80 " if $forwardport;
+if (exists $ssh_args{interactive}) {
+ if ($ssh_args{interactive}) {
+ $sshcommand .= " -o \"BatchMode no\"";
+ } else {
+ $sshcommand .= " -o \"BatchMode yes\"";
+ }
+}
+
+unless ($config{mode} eq "perl" || $config{mode} eq "ssh") {
+ warn "Unknown mode: $config{mode}\nExpected: perl|ssh\n\n";
+ pod2usage(2);
+}
+
+my %hosts = get_hosts();
+
+if ( $#ARGV < 0 ) {
+ warn "Tell me which command to execute\n\n";
+ pod2usage(2);
+}
+my $command = join " ", @ARGV;
+
+foreach my $host (sort keys %hosts) {
+ my $ip = ssh_ip($hosts{$host});
+ next unless defined $ip;
+ my $name = name($ip) || "NO DNS";
+ if ($config{all}) {
+ print "$host - $name - $ip\n";
+ } else {
+ print "$host - $name - $ip y/n? ";
+ my $input = <STDIN>;
+ next if $input =~ m/n/i;
+ }
+ print "$ip: $command\n" if $verbose;
+ if ($config{mode} eq "ssh") {
+ system "$sshcommand -t $ip \'$command\'";
+ } else {
+ my $result = ssh_cmd($hosts{$host}, \%ssh_args, $command);
+ if (!defined $result) {
+ warn ssh_err_msg();
+ } elsif ($result!=0) {
+ warn "error (remote command): " . $result . "\n"; #TODO error message
+ }
+ }
+ print "\n";
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-cmd.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-cmd.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[B<--[no]debug>]>
+ S<[B<--sshuser>=I<USER>]> S<[B<--[no]interactive>]>
+ S<[B<--forwardport>]>
+ S<[B<--all>]> COMMAND
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--[no]debug>|B<-d>
+
+Print additional information.
+
+Configuration file: C<$debug>
+
+Default: disabled
+
+
+=item B<--sshuser>=I<USER>
+
+Use I<USER> as user for ssh.
+
+Configuration file: ssh_args
+
+Default: current user
+
+
+=item B<--[no]sshinteractive>
+
+Set SSH mode to interactive (e.g. password prompt!).
+See also L<Net::SSH::Perl>.
+
+Configuration file: none
+
+Default: enabled
+
+
+=item B<--all>
+
+Copy all files without asking.
+
+Configuration file: none
+
+Default: false
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
+
Property changes on: trunk/build/HADES/bin/hades-cmd.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-cp.pl
===================================================================
--- trunk/build/HADES/bin/hades-cp.pl (rev 0)
+++ trunk/build/HADES/bin/hades-cp.pl 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,198 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+use Socket;
+use Pod::Usage;
+
+use Hades;
+
+
+create_config(
+ 'dir|d=s' => "/tmp",
+ 'all' => 0,
+ 'user|sshuser=s' => undef,
+ 'interactive|sshinteractive!' => undef,
+ 'mode=s' => "ssh", # "perl" or "ssh"
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $dir = $config{dir};
+
+my %ssh_args = ();
+$ssh_args{user} = $config{user} if $config{user};
+if (!defined $config{interactive}) {
+ # Not set (undef) => Default
+} elsif ($config{interactive} == 0) {
+ # Explicitly turned off!
+ $ssh_args{interactive} = 0;
+} elsif ($config{interactive}) {
+ # Explicitly turned on
+ $ssh_args{interactive} = 1;
+}
+
+my $sshcommand = "scp ";
+$sshcommand .= " -o \"User $ssh_args{user}\"" if $ssh_args{user};
+ # -o "User bla" more compatible than -l, which is e.g. not supported by scp
+if (exists $ssh_args{interactive}) {
+ if ($ssh_args{interactive}) {
+ $sshcommand .= " -o \"BatchMode no\"";
+ } else {
+ $sshcommand .= " -o \"BatchMode yes\"";
+ }
+}
+
+unless ($config{mode} eq "perl" || $config{mode} eq "ssh") {
+ warn "Unknown mode: $config{mode}\nExpected: perl|ssh\n\n";
+ pod2usage(2);
+}
+
+my %hosts = get_hosts();
+
+if ( $#ARGV < 0 ) {
+ warn "Tell me which files to copy\n\n";
+ pod2usage(2);
+}
+my @files = @ARGV;
+
+foreach my $host (sort keys %hosts) {
+ my $ip = ssh_ip($hosts{$host});
+ next unless defined $ip;
+ my $name = name($ip) || "NO DNS";
+ if ($config{all}) {
+ print "$host - $name - $ip\n";
+ } else {
+ print "$host - $name - $ip y/n? ";
+ my $input = <STDIN>;
+ next if $input =~ m/n/i;
+ }
+ if ($config{mode} eq "ssh") {
+ system "$sshcommand \"" . join('" "',@files) . "\" ${ip}:${dir}";
+ } else {
+ ssh_cp($hosts{$host}, \%ssh_args, $dir, @files)
+ or warn ssh_err_msg();
+ }
+ print "\n";
+}
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-cp.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-cp.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[B<--[no]debug>]>
+ S<[B<--sshuser>=-I<USER>]> S<[B<--[no]interactive>]>
+ S<[B<--all>]> S<[B<--dir|-d>=F<DIR>]> FILES...
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--[no]debug>|B<-d>
+
+Print additional information.
+
+Configuration file: C<$debug>
+
+Default: disabled
+
+
+=item B<--sshuser>=I<USER>
+
+Use I<USER> as user for ssh.
+
+Configuration file: ssh_args
+
+Default: current user
+
+
+=item B<--[no]sshinteractive>
+
+Set SSH mode to interactive (e.g. password prompt!).
+See also L<Net::SSH::Perl>.
+
+Configuration file: none
+
+Default: enabled
+
+
+=item B<--all>
+
+Copy all files without asking.
+
+Configuration file: none
+
+Default: false
+
+
+=item B<--dir|-d>=F<DIR>
+
+Set target directory.
+
+Configuration file: none
+
+Default: /tmp
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
+
Property changes on: trunk/build/HADES/bin/hades-cp.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-meta2db.pl
===================================================================
--- trunk/build/HADES/bin/hades-meta2db.pl (rev
0)
+++ trunk/build/HADES/bin/hades-meta2db.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,179 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use English;
+#use Statistics::Descriptive;
+use Pod::Usage;
+use File::Basename;
+use YAML;
+use Storable qw(retrieve);
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+use Hades;
+use Hades::DB;
+
+create_config(
+ "dry-run!" => 0,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $domain = $config{config};
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $dry_run = $config{dry_run};
+
+my $db = Hades::DB->new() unless $dry_run;
+
+foreach my $file (<STDIN>) {
+ chomp $file;
+ $file =~ m#([^/]+)/www/(\d\d\d\d)/(\d\d)/(\d\d)/[^/]+\.(\d+)\.info\.dat$#
+ or die "Invalid file path: $file\n";
+ my ($dom,$year,$month,$day,$mid) = ($1,$2,$3,$4,$5);
+ unless (
+ defined $dom && defined $year && defined $month && defined $day &&
+ defined $mid &&
+ $dom eq $domain
+ ) {
+ die "Invalid file path: $file\n";
+ }
+ print "Adding: $file\n" if $verbose;
+ my $info = retrieve($file);
+ ref($info) eq "HASH" or die "Corrupt meta data file $file\n";
+ unless ($dry_run) {
+ $db->file2db( info => $info,
+ year => $year, month => $month, day => $day, mid => $mid
+ ) or die "Error writing $file to database!\n";
+ }
+}
+
+
+exit 0;
+
+END {
+ $db->close_db() if $db;
+}
+
+## END OF MAIN ##
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-meta2db.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-meta2db.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[--[no]debug|-d]>
+ S<[B<--[no]new|-n>]>
+ S<[B<--sleeptime>=I<SEC>]>
+ S<B<--mday>=I<DD>> S<B<--month>=I<MM>> S<B<--year>=I<YYYY>>
+ S<[B<--[no]today|-n>]>
+ S<F<QoS data files ...>>
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+B<analyze_hosts.pl> fetches all .dat files of the given date from the
IPPM-PCs
+via scp and stores them in !!TODO!!/YYYY/MM/DD. Then it calls analyzer.pl for
+each of these files.
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+
+=item B<--[no]new>|B<-n>
+
+Already existing analyzer results will be discarded.
+
+Configuration file: n.a.
+
+Default: disabled
+
+
+=item B<--[no]debug>|B<-d>
+
+Print additional information.
+
+Configuration file: n.a.
+
+Default: disabled
+
+
+=item B<--wwwdir>=F<PATH>
+
+Use F<PATH> as output directory for generated data.
+
+Configuration file: C<$wwwdir>
+
+Default: F<[basedir]/www>
+
+
+=item B<--sleeptime>=-I<SEC>
+
+Sleeptime I<SEC> seconds after every host.
+
+Configuration file: C<$sleeptime>
+
+Default: 0
+
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
+
+
Property changes on: trunk/build/HADES/bin/hades-meta2db.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-mkmap.pl
===================================================================
--- trunk/build/HADES/bin/hades-mkmap.pl (rev
0)
+++ trunk/build/HADES/bin/hades-mkmap.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,251 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+#TODO
+# - This script is not working using Finder_SQL !!!
+# Reason: It assumes mid == fileno
+# Fix: Extract parameters from config => Use Finder_SQL using these params
+# ATTENTION: More than one measurement can exists!! Just use one... Which
one?
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use DateTime;
+use Pod::Usage;
+
+use Hades;
+use Hades::Map;
+use Hades::Map::Coords;
+use Hades::Map::Circle;
+use Hades::Map::Star;
+use Hades::Data::Finder;
+
+
+my $starttime = DateTime->now(); # For runtime calculation!
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "time=s" => undef,
+ "duration=s" => undef, # in hours!!
+ "timezone=s" => undef,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $debug = $config{debug};
+my $verbose = $config{verbose}; # Altlast...
+
+unless (
+ exists($config{maps}) && ref($config{maps}) eq 'ARRAY' && @{$config{maps}}
+) {
+ die "No maps defined in configuration file!\n";
+}
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my ($dt_begin,$dt_end);
+if ($config{from}) {
+ if ($config{duration} || $config{time}) {
+ pod2usage(
+ "You cannot use --duration or --time in conjunction with --from or
--to"
+ );
+ }
+ $dt_begin =
DateTime::Format::HTTP->parse_datetime($config{from},$timezone);
+ $dt_end = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+} elsif ($config{to}) {
+ pod2usage(
+ "You cannot use --to without --from"
+ );
+} else {
+ $dt_end = $config{time} ?
+ DateTime::Format::HTTP->parse_datetime($config{time},$timezone)
+ : DateTime->now(time_zone => $timezone);
+ $dt_begin = $dt_end->clone->subtract(hours => ($config{duration} || 3));
+}
+
+# One Hades::Data::Finder for all:
+my $finder = Hades::Data::Finder->new();
+# Look at last 3 hours:
+$finder->set_time($dt_begin,$dt_end);
+# Use IPPM aggregated data, because it's fast to load:
+$finder->set_type("ippm_aggregated");
+## Use "first" measurement:
+#$self->{finder}->set_mid(0);
+
+STDOUT->autoflush(1);
+
+#
+# The great loop processing all maps from config file
+#
+my $cache_data = {};
+my $cache_median = {};
+my $cache_loss = {};
+foreach my $cfg (@{$config{maps}}) {
+ my $image = $cfg->{image} or die "Missing map image file name!\n";
+ print "Creating map $image ...\n" if $verbose;
+ my $type_string = $cfg->{type}
+ or die "No type specified for map \"$image\"!\n";
+ my $type_class = Hades::Map->string2type($type_string);
+ if ($type_class->isa("Hades::Map::Star")) {
+ print "Creating star maps:" if $verbose;
+ unless (ref($cfg->{interfaces}) eq "ARRAY" && @{$cfg->{interfaces}}) {
+ # No interfaces specified => use all
+ $cfg->{interfaces} = [ $config->ifs_sorted_by_shortname ];
+ }
+ foreach my $center (@{$cfg->{interfaces}}) {
+ print " $center" if $verbose;
+ (my $image = $cfg->{image}) =~ s/\*/$center/g;
+ (my $imagemap = $cfg->{imagemap}) =~ s/\*/$center/g;
+ my $map = $type_class->new(
+ center => $center,
+ map => {
+ %$cfg, image => $image, imagemap => $imagemap,
+ },
+ finder => $finder,
+ cache_data => $cache_data,
+ cache_median => $cache_median, cache_loss => $cache_loss,
+ );
+ $map->prepare_image;
+ $map->determine_routes;
+ $map->retrieving_data;
+ $map->draw_map;
+ $map->write_image;
+ $map->write_imagemap if $map->{imagemap};
+ }
+ print "\nAll star maps created\n" if $verbose;
+ } else {
+ my $map = $type_class->new(
+ map => $cfg, finder => $finder,
+ cache_data => $cache_data,
+ cache_median => $cache_median, cache_loss => $cache_loss,
+ );
+ $map->prepare_image;
+ $map->determine_routes;
+ print "Retrieving data ...\n" if $verbose;
+ $map->retrieving_data;
+ print "Generating map ...\n" if $verbose;
+ $map->draw_map;
+ print "Writing to image file $image ...\n" if $verbose;
+ $map->write_image;
+ if ($map->{imagemap}) {
+ print "Writing to image map file $map->{imagemap} ...\n" if $verbose;
+ $map->write_imagemap;
+ } else {
+ print "No image map file created\n" if $verbose;
+ }
+ }
+}
+
+
+#
+# Finished all maps
+#
+print "Completed! (Execution time: " .
+ $starttime->delta_ms(DateTime->now)->seconds() . " sec)\n" if $verbose;
+
+
+exit 0;
+
+
+## END OF MAIN ##
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-mkmap.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-mkmap.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<B<--[no]verbose>> S<B<--[no]debug>>
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--[no]debug>
+
+Print additional debug informationen.
+
+Configuration file: C<$debug>
+
+Default: disabled
+
+
+=item B<--date>=I<DATE>
+
+Copy and analyze data of given DATE.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Of course, only day, month and year of the date are used to determine the
day.
+Overwrites S<B<--day>>, S<B<--month>>, and S<B<--year>>.
+
+Configuration file: n.a.
+
+Default: not set
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
+
Property changes on: trunk/build/HADES/bin/hades-mkmap.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-nagios-cfg-hosts.pl
===================================================================
--- trunk/build/HADES/bin/hades-nagios-cfg-hosts.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-nagios-cfg-hosts.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,151 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+
+use Hades;
+
+create_config(
+ "output|o=s" => "-",
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $domain = $config{domain};
+my $output = $config{output};
+
+$output = "-" if !defined($output) || $output eq "";
+open OUTPUT, ">$output" or die "Cannot open $output for writing: $!\n";
+
+my %hosts = get_hosts();
+
+foreach my $hostname (sort keys %hosts) {
+ my $ip = ssh_ip($hosts{$hostname});
+ print OUTPUT <<__EOF__;
+define host {
+ host_name $hostname
+ address $ip
+ use hades-host-$domain
+}
+
+__EOF__
+}
+
+exit 0;
+
+
+
+## END OF MAIN ##
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+hades-nagios-cfg-hosts.pl - Generate host related Nagios configuration
+
+=head1 SYNOPSIS
+
+B<hades-nagios-cfg-hosts.pl> [OPTIONS]
+
+
+
+=head1 DESCRIPTION
+
+This script uses the information from the Hades configuration to create a
+corresponding Nagios configuration. By default the result is written to
+I<STDOUT>. This behaviour can be changed using the option S<B<--output>>.
+
+The script creates a Nagios C<host> object for every available host in the
+Hades configuration.
+The created Nagios C<host> objects use the S<C<use>> statement to inherit
+further configuration directives from the elsewhere defined
+I<hades-host-E<lt>DOMAINE<gt>>
+host object. You have to define this C<host> object yourself to include the
+configuration created by this script into Nagios. The S<I<E<lt>DOMAINE<gt>>>
+will be replaced by the actual domain name. This way you can configure each
+domain separately or you can e.g. create a base object for all your
+I<hades-host-E<lt>DOMAINE<gt>>
+objects to configure all domains in one place. See the Nagios documentation
for
+further information.
+
+B<TBD> Add simple examples here and/or include complex examples in an extra
directory.
+
+
+
+=head1 OPTIONS
+
+Besides the following special command line options, this script also accepts
+the typical Hades options. See L<hades-options(7)> to get information about
all
+available Hades command line options and their corresponding configuration
+file options, if available.
+
+Common Hades options especially important for this script: S<B<--config>>
+
+
+=over
+
+
+=item B<-o, --output>=F<OUTPUT>
+
+Output is written to file F<OUTPUT>. If F<OUTPUT> is "B<->", output is sent
to
+I<STDOUT>.
+
+Default: "B<->"
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Write output to I<STDOUT>:
+
+ $ hades-nagios-cfg-hosts.pl
+
+or:
+
+ $ hades-nagios-cfg-hosts.pl --output=-
+
+or:
+
+ $ hades-nagios-cfg-hosts.pl -o -
+
+Write to some temporary file:
+
+ $ hades-nagios-cfg-hosts.pl --output=/tmp/hades-nagios-hosts.txt
+
+Or directly place the output (for domain "test") into a useful directory:
+
+ $ hades-nagios-cfg-hosts.pl \
+ --config=test --output=/etc/nagios/hades/hosts-test.cfg
+
+
+
+=head1 SEE ALSO
+
+L<hades-options(7)>, L<hades-nagios-cfg-routes(1)>
+
+
+
+=head1 AUTHORS
+
+Jochen Reinwand
+
Property changes on: trunk/build/HADES/bin/hades-nagios-cfg-hosts.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-nagios-cfg-routes.pl
===================================================================
--- trunk/build/HADES/bin/hades-nagios-cfg-routes.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-nagios-cfg-routes.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,181 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+
+use Hades;
+
+create_config(
+ "output|o=s" => "-",
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $domain = $config{domain};
+my $output = $config{output};
+
+$output = "-" if !defined($output) || $output eq "";
+open OUTPUT, ">$output" or die "Cannot open $output for writing: $!\n";
+
+my %routes = get_routes();
+
+my @mids = ();
+
+foreach my $sid (sort keys %routes) {
+ foreach my $rid (sort keys %{$routes{$sid}}) {
+ my $routename = "$sid.$rid";
+ print OUTPUT <<__EOF__;
+define host {
+ host_name $routename
+ address 127.0.0.1
+ use hades-route-$domain
+}
+__EOF__
+ for (my $mid=0 ; $mid <= $#{$routes{$sid}->{$rid}} ; $mid++) {
+ next unless defined $routes{$sid}->{$rid}->[$mid]
+ && ref($routes{$sid}->{$rid}->[$mid]) eq "HASH";
+ if (defined $mids[$mid]) {
+ push @{$mids[$mid]}, $routename;
+ } else {
+ $mids[$mid] = [ $routename ];
+ }
+ }
+ }
+}
+
+for (my $mid=0 ; $mid <= $#mids ; $mid++) {
+ next unless defined $mids[$mid];
+ my $hosts = join ',', @{$mids[$mid]};
+ print OUTPUT <<__EOF__;
+define service {
+ name hades-ippm-$mid-check-$domain
+ service_description IPPM Measurement $mid
+ host_name $hosts
+ use hades-ippm-check-$domain
+}
+__EOF__
+}
+
+exit 0;
+
+
+
+## END OF MAIN ##
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+hades-nagios-cfg-routes.pl - Generate measurement related Nagios config
+
+=head1 SYNOPSIS
+
+B<hades-nagios-cfg-routes.pl> [OPTIONS]
+
+
+
+=head1 DESCRIPTION
+
+This script uses the information from the Hades configuration to create a
+corresponding Nagios configuration. By default the result is written to
+I<STDOUT>. This behaviour can be changed using the option S<B<--output>>.
+
+The script creates a Nagios C<host> object for every available route that has
+measurements configured in the Hades configuration.
+The created Nagios C<host> objects use the S<C<use>> statement to inherit
+further configuration directives from the elsewhere defined
+I<hades-route-E<lt>DOMAINE<gt>>
+C<host> object. You have to define this C<host> object yourself to include
the
+configuration created by this script into Nagios. The S<I<E<lt>DOMAINE<gt>>>
+will be replaced by the actual domain name. This way you can configure each
+domain separately or you can e.g. create a base object for all your
+I<hades-route-E<lt>DOMAINE<gt>>
+objects to configure all domains in one place. See the Nagios documentation
for
+further information.
+
+There are also Nagios C<service> objects created that represent the
+measurements on the routes themselves. Similar to the C<host> objects for the
+routes, these C<service> objects inherit further configuration directives
from
+the elsewhere defined I<hades-ippm-check-E<lt>DOMAINE<gt>> C<service> object.
+
+B<TBD> Add simple examples here and/or include complex examples in an extra
directory.
+
+
+
+=head1 OPTIONS
+
+Besides the following special command line options, this script also accepts
+the typical Hades options. See L<hades-options(7)> to get information about
all
+available Hades command line options and their corresponding configuration
+file options, if available.
+
+Common Hades options especially important for this script: S<B<--config>>
+
+
+=over
+
+
+=item B<-o, --output>=F<OUTPUT>
+
+Output is written to file F<OUTPUT>. If F<OUTPUT> is "B<->", output is sent
to
+I<STDOUT>.
+
+Default: "B<->"
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Write output to I<STDOUT>:
+
+ $ hades-nagios-cfg-routes.pl
+
+or:
+
+ $ hades-nagios-cfg-routes.pl --output=-
+
+or:
+
+ $ hades-nagios-cfg-routes.pl -o -
+
+Write to some temporary file:
+
+ $ hades-nagios-cfg-routes.pl --output=/tmp/hades-nagios-routes.txt
+
+Or directly place the output (for domain "test") into a useful directory:
+
+ $ hades-nagios-cfg-routes.pl \
+ --config=test --output=/etc/nagios/hades/routes-test.cfg
+
+
+
+=head1 SEE ALSO
+
+L<hades-options(7)>, L<hades-nagios-cfg-hosts(1)>
+
+
+
+=head1 AUTHORS
+
+Jochen Reinwand
+
Property changes on: trunk/build/HADES/bin/hades-nagios-cfg-routes.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-nagios-check.pl
===================================================================
--- trunk/build/HADES/bin/hades-nagios-check.pl (rev
0)
+++ trunk/build/HADES/bin/hades-nagios-check.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,291 @@
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+use Pod::Usage;
+use DateTime;
+use Storable qw(retrieve);
+
+use Hades;
+
+
+create_config(
+ "maxage=i" => 7200,
+ "output|o=s" => "/var/spool/nagios/cmd/nagios.cmd",
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+my $max_age = $config{maxage};
+my $output = $config{output};
+
+$output = "-" if !defined($output) || $output eq "";
+open OUTPUT, ">$output" or die "Cannot open $output for writing: $!\n";
+
+my %return_codes = (
+ OK => 0, WARNING => 1, CRITICAL => 2, UNKNOWN => 3
+);
+
+my $fdate = DateTime->now->strftime("%Y/%m/%d");
+
+my %routes = get_routes();
+
+foreach my $sid (sort keys %routes) {
+ foreach my $rid (sort keys %{$routes{$sid}}) {
+ for (my $mid=0 ; $mid <= $#{$routes{$sid}->{$rid}} ; $mid++) {
+ my $datafile = "$datadir/$fdate/$sid.$rid.$mid.dat";
+ unless (-e $datafile) {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL","Raw data file does not exist"
+ );
+ next;
+ }
+ unless (open DATAFILE, "/usr/bin/tail -n 1 $datafile |") {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL","tail on raw data file failed: $!"
+ );
+ next;
+ }
+ my $line = <DATAFILE>;
+ close DATAFILE;
+ unless ($line =~ m/^\d+:\d+:\d+:\d+:\d+:\d+/) {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL","No valid data in raw data file"
+ );
+ next;
+ }
+ my ($seq, $s_sec, $s_msec, $r_sec, $r_msec, $psize) = split /:/, $line;
+ if ((time() - $s_sec) > $max_age) {
+ nagios_check(
+ $sid,$rid,$mid,"WARNING",
+ "No data newer than $max_age s in raw data file"
+ );
+ next;
+ }
+ my $infofile = "$wwwdir/$fdate/$sid.$rid.$mid.info.dat";
+ unless (-e $infofile) {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL","Analyzed data file does not exist"
+ );
+ next;
+ }
+ my $info;
+ eval {
+ $info = retrieve($infofile);
+ };
+ if ($@) {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL","Error retreiving analyzed data file: $@"
+ );
+ next;
+ }
+ unless (ref($info) eq "HASH") {
+ nagios_check(
+ $sid,$rid,$mid,"CRITICAL",
+ "Retreiving analyzed data file returned no data"
+ );
+ next;
+ }
+ if ((time() - $info->{time_last}) > $max_age) {
+ nagios_check(
+ $sid,$rid,$mid,"WARNING",
+ "No data newer than 7200 s in analyzed data file"
+ );
+ next;
+ }
+ nagios_check($sid,$rid,$mid,"OK","Data newer than $max_age s");
+ }
+ }
+}
+
+exit 0;
+
+
+
+### END OF MAIN ###
+
+
+
+sub nagios_check {
+ my ($sender,$receiver,$mid,$return_code,$message) = @_;
+ my $return_code_id = "INTERNAL ERROR";
+ if (exists $return_codes{$return_code}) {
+ $return_code_id = $return_codes{$return_code};
+ }
+ $message = "IPPM Measurement $mid $return_code" .
+ ($message ? " - $message" : "");
+ print OUTPUT "[".time."] PROCESS_SERVICE_CHECK_RESULT;" .
+ "$sender.$receiver;IPPM Measurement $mid;" .
+ "$return_code_id;$message\n";
+ return 1;
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+hades-nagios-check.pl - Monitor Hades measurements using Nagios
+
+=head1 SYNOPSIS
+
+B<hades-nagios-check.pl> [OPTIONS]
+
+
+
+=head1 DESCRIPTION
+
+This script determines the status of all configured Hades measurements of
+one domain and generates an output compatible with Nagios' passive checks
+mechanism. By default the result is written to
+I<STDOUT>. This behaviour can be changed using the option S<B<--output>>.
+
+B<IMPORTANT>: This script expects that Nagios has the corresponding
+measurements in its configuration. You can do this by using the script
+L<hades-nagios-cfg-routes(1)>.
+
+The following tests are done for every measurement that is configured
+in the configuration:
+
+
+=over
+
+
+=item *
+
+Does the I<raw> data file exist?
+
+
+=item *
+
+Can the I<raw> data file be opened?
+
+
+=item *
+
+Is there valid data in the I<raw> data file?
+
+
+=item *
+
+Is the data in the I<raw> data file up-to-date? See option S<B<--maxage>>.
+
+
+=item *
+
+Does the I<analyzed> data file exist?
+
+
+=item *
+
+Can the I<analyzed> data file be opened?
+
+
+=item *
+
+Is the data in the I<analyzed> data file up-to-date? See option
S<B<--maxage>>.
+
+
+=back
+
+This script should be started by a cron daemon, e.g. every 10 minutes.
+Furthermore it is useful to configure the checks in Nagios to be considered
+CRITICAL or WARNING if there is no status information retrieved for some
time.
+
+An example Nagios configuration:
+
+ define service {
+ name hades-ippm-check
+ service_description Hades measurements check
+ passive_checks_enabled 1
+ active_checks_enabled 0
+ check_freshness 1
+ # Make sure that passive results are up to date.
+ freshness_threshold 3600
+ # Results should not be older than an hour (value in s).
+ check_command hades-no-route-check
+ # This command is run only if the service results are "stale"
+ check_period 24x7
+ max_check_attempts 1
+ # Not used, but nagios will complain if missing
+ register 0
+ }
+
+
+
+
+=head1 OPTIONS
+
+Besides the following special command line options, this script also accepts
+the typical Hades options. See L<hades-options(7)> to get information about
all
+available Hades command line options and their corresponding configuration
+file options, if available.
+
+Common Hades options especially important for this script: S<B<--config>>
+
+
+=over
+
+
+=item B<-o, --output>=F<OUTPUT>
+
+Output is written to file F<OUTPUT>. If F<OUTPUT> is "B<->", output is sent
to
+I<STDOUT>.
+
+Default: F</var/spool/nagios/cmd/nagios.cmd>
+
+
+=item B<--maxage>=I<MAXAGE>
+
+The time in seconds after which the data in the data files is considered to
be
+too old.
+
+Default: 7200 (2 hours)
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Write output to I<STDOUT>:
+
+ $ hades-nagios-cfg-hosts.pl --output=-
+
+or:
+
+ $ hades-nagios-cfg-hosts.pl -o -
+
+Write to some temporary file:
+
+ $ hades-nagios-cfg-hosts.pl --output=/tmp/hades-checks.txt
+
+Or directly place the output (for domain "test") into a Nagios command fifo
+(named pipe):
+
+ $ hades-nagios-cfg-hosts.pl \
+ --config=test --output=/var/spool/nagios/cmd/nagios.cmd
+
+
+
+=head1 SEE ALSO
+
+L<hades-options(7)>, L<hades-nagios-cfg-routes(1)>,
+L<hades-nagios-cfg-hosts(1)>,
+L<http://nagios.sourceforge.net/docs/3_0/passivechecks.html>
+
+
+
+=head1 AUTHORS
+
+Jochen Reinwand
+
Property changes on: trunk/build/HADES/bin/hades-nagios-check.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-plot.pl
===================================================================
--- trunk/build/HADES/bin/hades-plot.pl (rev 0)
+++ trunk/build/HADES/bin/hades-plot.pl 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,220 @@
+#!/usr/bin/perl
+
+#TODO
+# - Usage of MID is a bit ugly, because it's only used for IPPM data
+# - Fill POD!! Take hades-show-data.pl as template!
+
+use warnings;
+use strict;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#/DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+
+use Hades;
+use Hades::Data::Finder;
+use Hades::Data::Finder_SQL;
+use Hades::Plot qw(:DEFAULT @output_formats &determine_output_formats);
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "mid=i" => undef,
+ ################################
+ "plots=s" => "owd,owdv,loss,tracert",
+ "outfile|o=s" => undef,
+ "outformat=s" => undef,
+ "width|w=s" => 800,
+ "height|h=s" => 1000,
+ "title=s" => "Unknown",
+ "plscript=s" => undef,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+
+my $outfile = $config{outfile};
+my $outformat = $config{outformat};
+unless ($outfile) {
+ pod2usage("Please set output filename using --outfile!\n");
+}
+determine_output_formats;
+if ( !$outformat ) {
+ my $outmatch = join '|', @output_formats;
+ if ($outfile =~ /\.($outmatch)$/i ) {
+ $outformat = lc $1;
+ } else {
+ die "Could not determine output format from file name! " .
+ "Set it with \"--outtype\"\n";
+ }
+} else {
+ $outformat = lc $outformat;
+ die "Unknown output format: $outformat\n"
+ unless grep($outformat eq $_, @output_formats);
+}
+
+
+my $timezone = DateTime::TimeZone->new(
+ name => ($config{timezone} ? $config{timezone} : 'local')
+);
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone) : undef;
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone) : undef;
+
+
+my @plots = ();
+my %data_objects = ();
+if ($#ARGV == 0) {
+ my $file = shift;
+ my $finder = Hades::Data::Finder->new();
+ my $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+ my $data_type = ref $data;
+ $data_objects{$data_type} = $data;
+ if ($data_type eq "Hades::Data::Traceroute") {
+ @plots = ( { type => "tracert", id => 0 } );
+ } elsif ($data_type eq "Hades::Data::BWCTL") {
+ @plots = ( { type => "bwctl", id => 0 } );
+ } elsif ($data_type eq "Hades::Data::IPPM_Aggregated") {
+ @plots = (
+ { type => "owd", id => 0 },
+ { type => "owdv", id => 1 },
+ { type => "loss", id => 2 },
+ #{ type => "lossbars", id => 0 } #TODO How to enable? config{XXX}?
+ );
+ } elsif ($data_type eq "Hades::Data::IPPM_Raw") {
+ # Raw -> Sanitized -> rawowd rawowdv
+ $data_objects{"Hades::Data::IPPM_Sanitized"} =
+ Hades::Data::IPPM_Sanitized->from_raw($data);
+ $data_objects{"Hades::Data::IPPM_Sanitized"}->extract_data
+ or die "Internal: Cannot sanitize data"; #TODO
+ @plots = (
+ { type => "rawowd", id => 0 },
+ { type => "rawowdv", id => 1 }
+ );
+ }
+ # start and end time are in Epoch and the Plot module knows how to deal
with
+ # it. This way we also don't have to deal with $timezone for this.
+ $from ||= $data->{start_time};
+ $to ||= $data->{end_time};
+} elsif ($#ARGV < 0) {
+ $from ||= DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+ $to ||= DateTime->now(time_zone => $timezone);
+
+ my $sender = $config{sender};
+ my $receiver = $config{receiver};
+ my $mid = $config{mid};
+ unless ($sender && $receiver) {
+ pod2usage("You have to set at least --sender and --receiver!\n");
+ }
+
+ my $count = 0;
+ foreach my $type (split /\s*,\s*/, $config{plots}) {
+ unless (valid_plot($type)) {
+ die "Invalid plot type: $type";
+ }
+ my %plot = ( type => $type, id => $count );
+ $data_objects{plot_source($type)} = [];
+ # only note that data object has to be created for later, see below
+ push @plots, \%plot;
+ $count++;
+ }
+ unless (@plots) {
+ die "No valid plots found!\n";
+ }
+
+ my $finder = Hades::Data::Finder_SQL->new();
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver);
+ $finder->set_mid($mid); # ignores "undef"
+ my @results = $finder->find;
+ foreach my $m (@results) {
+ my $data_type = ref $m;
+ next unless exists $data_objects{$data_type};
+ push @{$data_objects{$data_type}}, $m;
+ }
+ unless (%data_objects) {
+ die "No data found!\n";
+ }
+ foreach my $data_type (keys %data_objects) {
+ if ($#{$data_objects{$data_type}} == 0) {
+ # One data object => use it!
+ $data_objects{$data_type} = $data_objects{$data_type}->[0];
+ $data_objects{$data_type}->extract_data;
+ } elsif ($#{$data_objects{$data_type}} < 0) {
+ # No data object!
+ die "No data found for data type $data_type!\n";
+ } else { # $#{$data_objects{$data_type}} > 0
+ # More than one data object
+ my $message = "More than one measurement for data type $data_type:\n";
+ my @sorted =
+ sort { $a->{mid} <=> $b->{mid} } @{$data_objects{$data_type}};
+ foreach my $m (@sorted) {
+ $m->extract_meta;
+ $message .= "$m->{mid}:\n";
+ foreach my $key (sort keys %{$m->{meta}} ) {
+ $message .= " $key = $m->{meta}->{$key}\n";
+ }
+ }
+ die $message;
+ }
+ }
+} elsif ($#ARGV > 0) {
+ pod2usage("Only one data file on command line allowed!\n");
+}
+
+print "Generating $outfile ($outformat)\n";
+
+$Hades::Plot::plscript = $config{plscript};
+
+plot(
+ outfile => $outfile,
+ outformat => $outformat,
+ plots =>
\@plots,
+ data_objects => \%data_objects,
+ date_begin => $from,
+ date_end => $to,
+ width => $config{width},
+ height => $config{height},
+ title => $config{title},
+ clickmap => 0,
+);
+
+__END__
+
+
+=head1 NAME
+
+B<hades-plot.pl>
+
+=head1 SYNOPSIS
+
+B<hades-plot.pl> DATAFILE...
+
+
+=head1 EXAMPLES
+
+Direct data file access:
+hades-plot.pl --config=geant
/data/hades/geant/www/2007/08/25/Amsterdam_GEANT.Frankfurt_GEANT.0.info.dat
--outfile=out.png
+
+Using flexible search interfance:
+hades-plot.pl --config=geant --from=2007-08-07T00:00
--to=2007-08-08T00:00:00 --sender=Amsterdam_GEANT
--receiver=Amsterdam_SURFnet --mid=0 --outfile=out.png --timezone=UTC
+
+Same plot via CGI:
+http://www.win-labor.dfn.de/cgi-bin/hades/display.pl?config=geant;date=2007-08-07;route=Amsterdam_GEANT.Amsterdam_SURFnet.0.owd_owdv_loss_tracert
+
+Plot only selected plot types:
+hades-plot.pl --config=geant --from=2007-08-07T00:00
--to=2007-08-08T00:00:00 --sender=Amsterdam_GEANT
--receiver=Amsterdam_SURFnet --mid=0 --outfile=out.png --timezone=UTC
--plot=owd,loss
+
Property changes on: trunk/build/HADES/bin/hades-plot.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-show-data.pl
===================================================================
--- trunk/build/HADES/bin/hades-show-data.pl (rev
0)
+++ trunk/build/HADES/bin/hades-show-data.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,587 @@
+#!/usr/bin/perl
+
+#TODO
+# - add more data types...
+# - use other parameters as filters, when data is loaded directly from file?
+# - error handling
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+
+use Hades;
+use Hades::Data::Finder;
+use Hades::Data::Finder_SQL;
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "type=s" => undef,
+ "mid=i" => undef,
+ "filter=s" => undef,
+ ################################
+ "meta!" => 2, # see below
+ "statistics!" => 1,
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone)
+ : DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+my $sender = $config{sender};
+my $receiver = $config{receiver};
+my $type = $config{type};
+# Be nice and fail early on unsupported types:
+if (defined $type) {
+ unless (Hades::Data->is_type($type) || Hades::Data->is_string($type)
+ || $type eq "ippm_aggregated_from_raw") {
+ die "Unknown data type: $type\n";
+ }
+}
+my $mid = $config{mid};
+
+my $statistics = $config{statistics};
+my $meta = $config{meta};
+ # 2 => display data AND meta data
+ # 1 => display only meta data
+ # 0 => display only data
+
+my ($data,$warnings);
+my $type_orig = "";
+if ($#ARGV == 0) {
+ my $file = shift;
+ my $finder = Hades::Data::Finder->new();
+ $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+} elsif ($#ARGV < 0) {
+ my $finder = Hades::Data::Finder_SQL->new();
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver); # set_route understands "undef"
+ if (defined $type && $type eq "ippm_aggregated_from_raw") {
+ $type = "ippm_raw";
+ $type_orig = "ippm_aggregated_from_raw";
+ }
+ $finder->set_type($type); # ignores "undef"
+ $finder->set_mid($mid); # ignores "undef"
+ $finder->set_filter( { split /:/,$config{filter} } ) # filter with meta
data
+ if $config{filter};
+ my @results = $finder->find;
+ if ($#results == 0) {
+ $data = $results[0];
+ # Display later
+ } elsif ($#results > 0) {
+ print "Available measurements:\n";
+ my %results = ();
+ foreach (@results) {
+
$results{$_->{sender}}->{$_->{receiver}}->{$_->type2string()}->{$_->{mid}} =
$_;
+ }
+ #TODO more usefull listing!!!???
+ foreach my $sender (sort keys %results) {
+ foreach my $receiver (sort keys %{$results{$sender}}) {
+ foreach my $type (sort keys %{$results{$sender}->{$receiver}}) {
+ foreach my $mid (
+ sort {$a <=> $b} keys %{$results{$sender}->{$receiver}->{$type}}
+ ) {
+ my $meta =
$results{$sender}->{$receiver}->{$type}->{$mid}->{meta};
+ print "$sender $receiver $type $mid\n";
+ foreach my $key (sort keys %{$meta} ) {
+ print " $key = $meta->{$key}\n";
+ }
+ }
+ }
+ }
+ }
+ # Old, not very flexible, but very Perlish:
+ #map { print; } sort map "$_->{sender} $_->{receiver} " .
+ # $_->type2string() .
+ # " $_->{mid}\n",
+ # @results;
+ exit 0;
+ } else {
+ print "No data found!\n";
+ exit 1;
+ }
+ #TODO Hades::Warnings ..... die join("\n", @$warnings)."\n" if @$warnings;
+} elsif ($#ARGV > 0) {
+ pod2usage("Only one data file on command line allowed!\n");
+}
+
+#
+# Now handle requested "aggregated" data types:
+#
+if ($type_orig eq "ippm_aggregated_from_raw") {
+ $data = Hades::Data::IPPM_Aggregated->new(
+ sender => $data->{sender},
+ receiver => $data->{receiver},
+ mid => $data->{mid},
+ start_time => $data->{start_time},
+ end_time => $data->{end_time},
+ ######################################
+ warnings => $data->{warnings},
+ get_data_object => Hades::Data::IPPM_Sanitized->new(
+ sender => $data->{sender},
+ receiver => $data->{receiver},
+ mid => $data->{mid},
+ start_time => $data->{start_time},
+ end_time => $data->{end_time},
+ ######################################
+ warnings => $data->{warnings},
+ get_data_object => $data,
+ get_data_options => undef,
+ ),
+ get_data_options => undef,
+ );
+}
+
+#
+# Display meta data
+#
+$data->extract_meta(); #TODO error handling.....
+print Dumper $data->{warnings}->get(); # TODO
+if ($meta != 0) {
+ print "Meta data:\n";
+ if ( $data->isa("Hades::Data::IPPM_Aggregated") ||
+ $data->isa("Hades::Data::IPPM_Sanitized") ||
+ $data->isa("Hades::Data::IPPM_Raw") ||
+ $data->isa("Hades::Data::Traceroute") ||
+ $data->isa("Hades::Data::BWCTL") ) {
+ my %meta = %{$data->{meta}};
+ foreach my $key (sort {$a cmp $b} keys %meta) {
+ my $value = $meta{$key};
+ chomp $value if $value;
+ printf " %-18s: %s\n", $key, $value || "<UNDEF>";
+ }
+ } else {
+ die "\nUnkown data object type\n";
+ }
+ print "\n";
+}
+exit 0 if $meta == 1;
+
+#
+# Display data
+#
+$data->extract_data(); #TODO error handling.....
+print Dumper $data->{warnings}->get(); # TODO
+if ( $data->isa("Hades::Data::IPPM_Aggregated") ) {
+ printf "%-13s %-12s %-12s %-12s %-5s Time:\n",
+ "Time stamp:", "Min (s):", "Med (s):", "Max (s):","Loss:";
+ foreach my $ref (@{$data->{data}}) {
+ my $time = $$ref{"time"};
+ unless ($time) {
+ # Lost group
+ print "# lost group\n";
+ next;
+ }
+ my $min_owd = $$ref{"min_owd"};
+ my $med_owd = $$ref{"med_owd"};
+ my $max_owd = $$ref{"max_owd"};
+ my $loss = $$ref{"lost_packets"};
+ printf "%-13i %-12f %-12f %-12f %-5i %s\n",
+ $time, $min_owd, $med_owd, $max_owd, $loss,
+ DateTime->from_epoch(epoch => $time, time_zone => $timezone)
+ ->strftime("%d.%m.%Y %H:%M:%S");
+ }
+} elsif ($data->isa("Hades::Data::IPPM_Sanitized")) {
+ printf " %-11s %-20s %-20s Time:\n\n",
+ "Seqnr:", "Sent time:", "Receive time:";
+ foreach my $ref (@{$data->{data}}) {
+ unless (ref($ref) eq "HASH") {
+ if ($ref eq "lost_group") {
+ print "# lost group\n";
+ }
+ next;
+ }
+ print "Lost: $ref->{lost_packets} Dup: " .
+ (@{$ref->{duplicate_packets}} ?
+ join "/", map {$_->{seqnr}} @{$ref->{duplicate_packets}} : "none") .
+ " Reordered: " .
+ (@{$ref->{reordered_packets}} ?
+ join " ", @{$ref->{reordered_packets}} : "none") .
+ "\n";
+ my $senttime_sec_old = 0;
+ my $senttime_str;
+ for my $i (0 .. $data->{meta}->{groupsize}-1) {
+ my $seqnr = $ref->{seqnr}->[$i];
+ unless (defined($seqnr)) {
+ print "# lost packet\n";
+ next;
+ }
+ my $senttime = $ref->{senttime}->[$i];
+ my $recvtime = $ref->{recvtime}->[$i];
+ my $senttime_sec = sprintf("%.0f",$senttime);
+ if ($senttime_sec != $senttime_sec_old) {
+ # Do time consuming date conversion only if necessary
+ $senttime_str = DateTime->from_epoch(
+ epoch => $senttime_sec, time_zone => $timezone
+ )->strftime("%d.%m.%Y %H:%M:%S");
+ $senttime_sec_old = $senttime_sec;
+ }
+ printf " %-11i %-20s %-20s %s\n",
+ $seqnr, $senttime, $recvtime, $senttime_str;
+ }
+ }
+} elsif ($data->isa("Hades::Data::IPPM_Raw")) {
+ printf "%-13s %-21s %-22s Time:\n",
+ "Seqnr:", "Sent time sec.nsec:", "Receive time sec.nsec:";
+ my $senttime_sec_old = 0;
+ my $senttime_str;
+ foreach my $ref (@{$data->{data}}) {
+ unless (ref($ref) eq "HASH") {
+ if ($ref eq "eof") {
+ print "# end of file\n";
+ } elsif ($ref eq "receiver_restart") {
+ print "# receiver restart\n";
+ }
+ next;
+ }
+ my $seqnr = $ref->{seqnr};
+ my $senttime_sec = $ref->{senttime_sec};
+ my $senttime_nsec = $ref->{senttime_nsec};
+ my $recvtime_sec = $ref->{recvtime_sec};
+ my $recvtime_nsec = $ref->{recvtime_nsec};
+ if ($senttime_sec != $senttime_sec_old) {
+ # Do time consuming date conversion only if necessary
+ $senttime_str = DateTime->from_epoch(
+ epoch => $senttime_sec, time_zone => $timezone
+ )->strftime("%d.%m.%Y %H:%M:%S");
+ $senttime_sec_old = $senttime_sec;
+ }
+ #printf "%-13i %-21s %-21s $day.$month.$year $hour:$min:$sec\n",
+ # $seqnr, "$senttime_sec.$senttime_nsec",
"$recvtime_sec.$recvtime_nsec";
+ printf "%-13i %s.%09lu %s.%09lu %s\n",
+ $seqnr, $senttime_sec, $senttime_nsec, $recvtime_sec, $recvtime_nsec,
+ $senttime_str;
+ }
+} elsif ($data->isa("Hades::Data::Traceroute")) {
+ print "Traceroutes:\n";
+ for (my $i=0 ; $i <= $#{$data->{traceroutes}} ; $i++) {
+ print " $i:\n";
+ for (my $j=0 ; $j <= $#{$data->{traceroutes}->[$i]} ; $j++) {
+ unless ( defined($data->{traceroutes}->[$i]->[$j]) ) {
+ print " MISSING HOP ENTRY\n";
+ next;
+ }
+ print " $j " .
+ $data->{traceroutes}->[$i]->[$j]->{name} . " " .
+ $data->{traceroutes}->[$i]->[$j]->{ip} . "\n";
+ }
+ }
+ print "Timeline:\n";
+ #TODO time zones !!!
+ for (my $i=0 ; $i < $#{$data->{timeline}} ; $i++) {
+ # note:
+ # Last entry in @timeline only has the "end" date for previous interval
+ print " ";
+ print DateTime->from_epoch(
+ epoch => $data->{timeline}->[$i]->{time}, time_zone => $timezone
+ )->strftime("%d.%m.%Y %H:%M:%S");
+ print " ";
+ if (!defined $data->{timeline}->[$i]->{ref}) {
+ print "Unknown";
+ } elsif ($data->{timeline}->[$i]->{ref} < 0) {
+ if (defined $data->{timeline}->[$i]->{error}) {
+ print "ERROR: $data->{timeline}->[$i]->{error}";
+ } else {
+ print "UNKNOWN ERROR";
+ }
+ } else {
+ print "Traceroute " . $data->{timeline}->[$i]->{ref};
+ }
+ print "\n";
+ }
+ print " ";
+ print DateTime->from_epoch(
+ epoch => $data->{timeline}->[-1]->{time}, time_zone => $timezone
+ )->strftime("%d.%m.%Y %H:%M:%S");
+ print " End of data";
+} elsif ($data->isa("Hades::Data::BWCTL")) {
+ printf "%-14s %-14s %-14s Time:\n",
+ "Minimum:", "Maximum:", "Average:";
+ foreach my $ref (@{$data->{data}}) {
+ unless (ref($ref) eq "HASH") {
+ # Now special entries known
+ next;
+ }
+ printf "%-14i %-14i %-14i %s\n",
+ $ref->{min}, $ref->{max}, $ref->{avg},
+ DateTime->from_epoch(epoch => $ref->{time}, time_zone => $timezone)
+ ->strftime("%d.%m.%Y %H:%M:%S");
+ }
+#} elsif ( $data->isa(...) ) ....
+} else {
+ die "Unkown data object type\n";
+}
+
+
+#
+# Display statistics data
+#TODO Bei nur einer Datei das Zeug aus der Info-Datei nehmen?!?!?!?
+#
+$data->calculate_statistics(); #TODO error handling.....
+print Dumper $data->{warnings}->get(); # TODO
+if ($statistics) {
+ print "\nStatistics:\n";
+ if ( $data->isa("Hades::Data::IPPM_Aggregated") ||
+ $data->isa("Hades::Data::IPPM_Sanitized") ) {
+ my %statistics = %{$data->{statistics}};
+ foreach my $key (sort {$a cmp $b} keys %statistics) {
+ my $value = $statistics{$key};
+ chomp $value if $value;
+ if ($key eq "reordered_packets") {
+ shift @{$value}; # First value is empty, should be ALL not reordered
+ if (@{$value}) {
+ $value = join " ", @{$value};
+ } else {
+ $value = "none";
+ }
+ }
+ printf " %-18s: %s\n", $key, defined($value) ? $value : "<UNDEF>";
+ }
+ } elsif ( $data->isa("Hades::Data::IPPM_Raw") ) {
+ print " No statistical data available for this type of data\n";
+ }
+ print "\n";
+}
+
+exit 0;
+
+
+__END__
+
+
+=head1 NAME
+
+B<hades-show-data.pl> - Find and display different types of data.
+
+=head1 SYNOPSIS
+
+B<hades-show-data.pl> S<[B<--help>]>
+
+B<hades-show-data.pl> S<[B<--config>=F<CONFIGFILE>]> S<[B<--[no]verbose>]>
+ S<[B<--from>=I<DATE>]> S<[B<--to>=I<DATE>]> S<[B<--timezone>=I<TZNAME>]>
+ S<[B<--sender>=I<INTERFACE>]> S<[B<--receiver>=I<INTERFACE>]>
+ S<[B<--type>=I<TYPE>]> S<[B<--mid>=I<MID>]>
+ S<[B<--filter>=I<NAME:VALUE:NAME:VALUE...>]>
+ S<[B<--[no]meta>]> S<[B<--[no]statistics>]>
+
+B<hades-show-data.pl> S<[B<--[no]meta>]> S<[B<--[no]statistics>]> F<DATAFILE>
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+The script has two basic I<Working Modes>:
+
+=over
+
+=item 1.
+
+By using the various command line parameters you can specify meta data
+about the data you are looking for. If this meta data fits exactly one
+measurement, you will get data from that measurement. If more than one
+measurement is found, you will get a list of available measurements to be
able
+to narrow your search further. If no corresponding meta data is found you
will
+also be informed about it.
+
+=item 2.
+
+If a data file is specified on the command line, the script tries to
+find out what sort of measurement data it holds and displays that data.
+Most of the command line options are, of course, ignored in this mode.
+
+=back
+
+Normally all data types that can be retrieved using this script can be
+divided into three different sorts:
+
+=over
+
+=item 1.
+
+The real B<data> that was collected during the execution of the measurement.
+
+=item 2.
+
+The B<meta data> that is normally represented by the parameters used for the
+measurement. It answers the important questions: Who, when and how was the
+measurement running.
+
+=item 3.
+
+The B<statistical data> that is derived from the normal B<data>, and normally
+only a simple aggregation on the data.
+
+=back
+
+Without special parameters the script will print all three sorts of data. You
+can modify this behaviour by using the parameters S<[B<--[no]meta>]> and
+S<[B<--[no]statistics>]>, see below.
+
+Note: It depends on the type of measurement, how time consuming retrieving
+B<data>, B<meta data> and/or B<statistical data> is. Some measurement types
+(like I<ippm_aggregated>) can give fast access to B<meta data>, whilst other
+(like I<ippm_raw>) internally force loading of all the data. Calculating the
+B<statistical data> is most likely the most time consuming step for all
types.
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--from>=I<DATE>
+
+Set the start date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: today 00:00:00 (aka. beginning of today)
+
+
+=item B<--to>=I<DATE>
+
+Set the end date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: now
+
+
+=item B<--timezone>=I<TZNAME>
+
+The I<TZNAME> given is a "link" name in the Olson database. The time zone it
+represents is used for displaying date and time. It is also used as default
+time zone for the above parameters B<--from> and B<--to>.
+
+Default: local time zone
+
+
+=item B<--sender>=I<INTERFACE>
+
+Set sender interface.
+
+Default: none
+
+
+=item B<--receiver>=I<INTERFACE>
+
+Set receiver interface.
+
+Default: none
+
+
+=item B<--type>=I<TYPE>
+
+Set measurement type.
+
+Possible values: S<I<ippm_aggregated>> S<I<ippm_sanitized>> S<I<ippm_raw>>
+S<I<traceroute>> S<I<traceroute_raw>> ...
+
+Special values: S<I<ippm_aggregated_from_raw>>
+
+Default: none
+
+
+=item B<--mid>=I<MID>
+
+Set MID (Measurement ID). Together with sender, receiver and type a unique
+identifier of a measurement.
+
+Default: none
+
+
+=item B<--filter>=I<NAME:VALUE:NAME:VALUE...>
+
+Filter using meta data. Normally meta data is organizied in key-value pairs.
+This parameter allows you to set the desired keys and values.
+
+Default: none
+
+
+=item B<--[no]meta>
+
+Using S<B<--nometa>> suppresses the printing of the meta data. When using
+S<B<--meta>> only the meta data will be printed.
+
+
+=item B<--[no]statistics>
+
+Using S<B<--nostatistics>> suppresses printing of statistical data.
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+List all available measurements for today:
+
+ hades-show-data.pl
+
+All ippm_aggregated measurements from Erlangen_Uni to Leipzig_Uni between
+09:30 and 09:40 on 18.1.2006:
+
+ hades-show-data.pl \
+ --from="2006.01.18 09:30:00" --to="2006.01.18 09:40:00" \
+ --type=ippm_aggregated --sender=Erlangen_Uni --receiver=Leipzig_Uni
+
+Display data from a data file:
+
+ hades-show-data.pl Erlangen_Uni.Leipzig_Uni.0.info.dat
+
+Search using meta data filter:
+
+ hades-show-data-new.pl --type=ippm_aggregated --filter=packetsize:1472
Property changes on: trunk/build/HADES/bin/hades-show-data.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-show-extremes.pl
===================================================================
--- trunk/build/HADES/bin/hades-show-extremes.pl
(rev 0)
+++ trunk/build/HADES/bin/hades-show-extremes.pl 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,293 @@
+#!/usr/bin/perl
+
+#TODO
+# - error handling
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+use DateTime;
+use DateTime::Format::HTTP;
+use POSIX;
+
+use Hades;
+use Hades::Data::Finder;
+
+create_config(
+ "from=s" => undef,
+ "to=s" => undef,
+ "timezone=s" => undef,
+ "sender=s" => undef,
+ "receiver=s" => undef,
+ "mid=i" => undef,
+ "filter=s" => undef,
+) or pod2usage(2);
+
+my $verbose = $config{verbose};
+my $debug = $config{debug};
+my $datadir = $config{datadir};
+my $wwwdir = $config{wwwdir};
+
+my $timezone = $config{timezone} ?
+ DateTime::TimeZone->new( name => $config{timezone} )
+ : DateTime::TimeZone->new( name => "local" );
+my $from = $config{from} ?
+ DateTime::Format::HTTP->parse_datetime($config{from},$timezone)
+ : DateTime->now(time_zone => $timezone)->truncate(to => 'day');
+my $to = $config{to} ?
+ DateTime::Format::HTTP->parse_datetime($config{to},$timezone)
+ : DateTime->now(time_zone => $timezone);
+my $sender = $config{sender};
+my $receiver = $config{receiver};
+my $mid = $config{mid};
+
+my (@data,$warnings);
+my $finder = Hades::Data::Finder->new();
+if ($#ARGV >= 0) {
+ while (my $file = shift) {
+ my $data = $finder->from_file($file)
+ or die "Error reading file:\n" . Dumper $finder->{warnings}->get(); #
TODO
+ $data->isa("Hades::Data::IPPM_Aggregated")
+ or die "Not an aggregated IPPM data file ($file)\n";
+ push @data, $data;
+ }
+} elsif ($#ARGV < 0) {
+ $finder->set_time($from,$to) or die "Invalid date format\n";
+ $finder->set_route($sender,$receiver); # set_route understands "undef"
+ $finder->set_type("ippm_aggregated");
+ $finder->set_mid($mid); # ignores "undef"
+ $finder->set_filter( { split /:/,$config{filter} } ) # filter with meta
data
+ if $config{filter};
+ @data = $finder->find;
+ unless (@data) {
+ print "No data found!\n";
+ exit 1;
+ }
+ #TODO Hades::Warnings ..... die join("\n", @$warnings)."\n" if @$warnings;
+}
+
+#
+# Display data
+#
+foreach my $data (@data) {
+ $data->extract_data(); #TODO error handling.....
+ print Dumper $data->{warnings}->get(); # TODO
+ unless (ref($data->{data}) eq "ARRAY" && @{$data->{data}}) {
+ #TODO
+ next;
+ }
+ $data->calculate_statistics(); #TODO error handling.....
+ my $lossrate_percent = ($data->{statistics}->{lost_packets} /
+ $data->{statistics}->{total_packets}) * 100;
+ my %extremes = ();
+ my $first_run = 1;
+ my @med_owd = ();
+ my @med_ipdv = ();
+ foreach my $ref (@{$data->{data}}) {
+ unless (exists $ref->{time} && $ref->{time}) {
+ # Lost group
+ next;
+ }
+ push @med_owd, $ref->{med_owd};
+ push @med_ipdv, $ref->{med_ipdv};
+ if ($first_run) {
+ $extremes{min_owd} = $ref->{min_owd};
+ $extremes{max_owd} = $ref->{max_owd};
+ $extremes{min_ipdv} = $ref->{min_ipdv};
+ $extremes{max_ipdv} = $ref->{max_ipdv};
+ $first_run = 0;
+ next;
+ }
+ if ($ref->{min_owd} < $extremes{min_owd}) {
+ $extremes{min_owd} = $ref->{min_owd};
+ }
+ if ($ref->{min_ipdv} < $extremes{min_ipdv}) {
+ $extremes{min_ipdv} = $ref->{min_ipdv};
+ }
+ if ($ref->{max_owd} > $extremes{max_owd}) {
+ $extremes{max_owd} = $ref->{max_owd};
+ }
+ if ($ref->{max_ipdv} > $extremes{max_ipdv}) {
+ $extremes{max_ipdv} = $ref->{max_ipdv};
+ }
+ }
+ @med_owd = sort {$a<=>$b} @med_owd;
+ @med_ipdv = sort {$a<=>$b} @med_ipdv;
+ $extremes{med_owd} = $med_owd[floor($#med_owd/2)];
+ $extremes{med_ipdv} = $med_ipdv[floor($#med_ipdv/2)];
+ #
+ # Zeile ausgeben
+ #
+ print $from->ymd("-")." ".$from->hms(":").",";
+ print "$data->{sender},$data->{receiver},$data->{mid}";
+ foreach my $value (qw(min_owd med_owd max_owd min_ipdv med_ipdv max_ipdv))
{
+ #print ",$extremes{$value}";
+ printf(",%1.10f", $extremes{$value});
+ }
+ print ",$data->{statistics}->{lost_packets}";
+ print ",$data->{statistics}->{total_packets}";
+ print ",$lossrate_percent";
+ print "\n";
+}
+
+
+exit 0;
+
+
+__END__
+
+
+=head1 NAME
+
+B<hades-show-extremes.pl> - Find anomalies in IPPM data.
+
+=head1 SYNOPSIS
+
+B<hades-show-extremes.pl> S<[B<--help>]>
+
+B<hades-show-extremes.pl> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]>
+ S<[B<--from>=I<DATE>]> S<[B<--to>=I<DATE>]>
+ S<[B<--sender>=I<INTERFACE>]> S<[B<--receiver>=I<INTERFACE>]>
+ S<[B<--mid>=I<MID>]> S<[B<--filter>=I<NAME:VALUE:NAME:VALUE...>]>
+
+B<hades-show-extremes.pl> S<[B<--[no]verbose>]>
+ F<DATAFILE>...
+
+
+=head1 DESCRIPTION
+
+TODO
+
+CSV output:
+
+SENDER,RECEIVER,MID,MIN_OWD,MED_OWD,MAX_OWD,MIN_OWDV,MED_OWDV,MAX_OWDV,LOST_PACKETS,TOTAL_PACKETS,LOSS_RATE
+
+The script has two basic working modes:
+
+=over
+
+=item 1.
+
+By using various command line parameters you can specify meta data
+about the IPPM data you want to check for anomalies.
+
+=item 2.
+
+If one ore more data files are specified on the command line, the script
tries
+to load the IPPM data to be checked from these files.
+
+=back
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in defaults, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--from>=I<DATE>
+
+Set the start date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: today 00:00:00 (aka. beginning of today)
+
+
+=item B<--to>=I<DATE>
+
+Set the end date for the data to be retrieved.
+This parameter understands the date/time format recognized by
+L<DateTime::Format::HTTP>. See man page for further details.
+Please note that this format allows to explicitly specify the time zone.
+
+Default: now
+
+
+=item B<--timezone>=I<TZNAME>
+
+The I<TZNAME> given is a "link" name in the Olson database. The time zone it
+represents is used as default time zone for the above parameters
+B<--from> and B<--to>.
+
+Default: local time zone
+
+
+=item B<--sender>=I<INTERFACE>
+
+Set sender interface.
+
+Default: none
+
+
+=item B<--receiver>=I<INTERFACE>
+
+Set receiver interface.
+
+Default: none
+
+
+=item B<--mid>=I<MID>
+
+Set MID (Measurement ID). Together with sender, receiver a unique
+identifier of a IPPM measurement.
+
+Default: none
+
+
+=item B<--filter>=I<NAME:VALUE:NAME:VALUE...>
+
+Filter using meta data. Normally meta data is organizied in key-value pairs.
+This parameter allows you to set the desired keys and values.
+
+Default: none
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Find all anomalies for today (using default configuration):
+
+ hades-show-extremes.pl
+
+Find anomalies in a data file:
+
+ hades-show-extremes.pl Erlangen_Uni.Leipzig_Uni.qos_ai.dat
+
Property changes on: trunk/build/HADES/bin/hades-show-extremes.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-show-hosts.pl
===================================================================
--- trunk/build/HADES/bin/hades-show-hosts.pl (rev
0)
+++ trunk/build/HADES/bin/hades-show-hosts.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,120 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Pod::Usage;
+
+use Hades;
+
+
+create_config(
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my %hosts = %{$config{hosts}};
+
+my @warnings = ();
+
+if (@ARGV) {
+ foreach my $host (@ARGV) {
+ print_host($host, "NOT FOUND");
+ }
+} else {
+ foreach my $host (sort keys %hosts) {
+ print_host($host, "NOT IP");
+ }
+}
+
+
+sub print_host {
+ my ($host,$errmsg) = @_;
+ print "$host -> ";
+ if (defined $hosts{$host}->{ip}) {
+ print $hosts{$host}->{ip};
+ } else {
+ print $errmsg;
+ return;
+ }
+ print "\n";
+ foreach my $if (sort keys %{$hosts{$host}->{interfaces}}) {
+ print " $if -> " . $hosts{$host}->{interfaces}->{$if}->{ip} . "\n"
+ }
+ return 1;
+}
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-show-hosts.pl> - Test configuration file
+
+=head1 SYNOPSIS
+
+B<hades-show-hosts.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> [HOSTNAME...]
+
+
+
+=head1 DESCRIPTION
+
+At the moment prints (primary) IP addresses of all hosts or host names
+from command line (S<[HOSTNAME...]>). Keep in mind: "host name" is the name
+of the host in the configuration file, not the DNS name!
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+E.g. the hostname related to the SSH ip determined through DNS.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+
Property changes on: trunk/build/HADES/bin/hades-show-hosts.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-ssh.pl
===================================================================
--- trunk/build/HADES/bin/hades-ssh.pl (rev 0)
+++ trunk/build/HADES/bin/hades-ssh.pl 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,186 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+# - It should be possible to use /.../xyz on the command line for setting
regexp
+# - SSH rework ...
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Socket;
+use Pod::Usage;
+
+use Hades;
+
+create_config(
+ 'all' => 0,
+ 'user|sshuser=s' => undef,
+ 'forwardport' => undef
+) or pod2usage(2);
+
+# Do not always use the config hash, instead set useful variables
+my $verbose = $config{verbose};
+my $forwardport = $config{forwardport};
+
+my %ssh_args = ();
+$ssh_args{user} = $config{user} if $config{user};
+
+my $sshcommand = "ssh ";
+$sshcommand = "$sshcommand -R 8000:127.0.0.1:80 " if $forwardport;
+
+if ( $#ARGV < 0 ) {
+ warn "Tell me which hosts to connect to\n\n";
+ pod2usage(2);
+}
+
+my %hosts = get_hosts;
+my %interfaces = get_interfaces;
+
+foreach my $arg (@ARGV) {
+ my %hosts_todo = ();
+ my $pat = qr/$arg/i;
+ foreach (sort keys %hosts) {
+ if (/$pat/) {
+ $hosts_todo{$_} = 1;
+ }
+ }
+ foreach (sort keys %interfaces) {
+ if (/$pat/) {
+ $hosts_todo{if2host($_)} = 1;
+ }
+ }
+ die "No host found matching $arg\n" unless %hosts_todo;
+ foreach my $host (sort keys %hosts_todo) {
+ my $ip = ssh_ip($hosts{$host});
+ die "Cannot determine ssh ip address for $host\n" unless defined $ip;
+ my $name = name($ip) || "NO DNS";
+ if ($config{all}) {
+ print "$host - $name - $ip\n";
+ } else {
+ print "$host - $name - $ip y/n? ";
+ my $input = <STDIN>;
+ next if $input =~ m/n/i;
+ }
+ system "$sshcommand " . ($ssh_args{user} ? "-l $ssh_args{user}" : "") .
+ " $ip";
+ print "\n";
+ }
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+B<hades-cmd.pl> - TODO
+
+=head1 SYNOPSIS
+
+B<hades-cmd.pl> S<[B<--help>]> S<[B<--config>=F<CONFIGFILE>]>
+ S<[B<--[no]verbose>]> S<[B<--[no]debug>]>
+ S<[B<--sshuser>=I<USER>]> S<[B<--[no]interactive>]>
+ S<[B<--forwardport>]>
+ S<[B<--all>]> COMMAND
+
+
+
+=head1 DESCRIPTION
+
+TODO
+
+
+
+=head1 OPTIONS
+
+Nearly all options have a built in default, that can be overwritten using
+command line arguments or variables in the configuration file. Arguments
+have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Use F<CONFIGFILE> as configuration file.
+
+
+=item B<--[no]verbose>
+
+Print additional informationen.
+
+Configuration file: C<$verbose>
+
+Default: disabled
+
+
+=item B<--[no]debug>|B<-d>
+
+Print additional information.
+
+Configuration file: C<$debug>
+
+Default: disabled
+
+
+=item B<--sshuser>=I<USER>
+
+Use I<USER> as user for ssh.
+
+Configuration file: ssh_args
+
+Default: current user
+
+
+=item B<--[no]sshinteractive>
+
+Set SSH mode to interactive (e.g. password prompt!).
+See also L<Net::SSH::Perl>.
+
+Configuration file: none
+
+Default: enabled
+
+
+=item B<--all>
+
+Copy all files without asking.
+
+Configuration file: none
+
+Default: false
+
+
+=item TODO
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+> data2www.pl --date="23.4.2003"
+
+> data2www.pl --yesterday
+
+> data2www.pl --today
+
+
Property changes on: trunk/build/HADES/bin/hades-ssh.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hades-traceroute.pl
===================================================================
--- trunk/build/HADES/bin/hades-traceroute.pl (rev
0)
+++ trunk/build/HADES/bin/hades-traceroute.pl 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,1250 @@
+#!/usr/bin/perl
+
+# See embedded POD below for further information
+
+# TODO
+# - Detect and remove orphaned PID file! Or should it be done in init
scripts?
+# - Use IPC::run instead of backticks?
+# - Rework the handling of SIGTERM, SIGUSR1, SIGUSR2 too provide different
ways
+# of shutting down like in hades-analyzed.pl or oppd.pl ?
+# By using IPC::Run it would be possible to control the running traceroute
+# more efficiently, e.g. killing it with kill_kill.
+# - Implement more than one traceroute data file per route and add missing
+# parameters:
+# - packetlen
+# Is the packet size exactly the given value?
+# - -F Set the "Don't Fragment" bit.
+# Support in Hades?
+# - -t tos For IPv4, set the Type of Service (TOS) and Precedence value.
+# For IPv6, set the Traffic Control value.
+# Are the values from the Hades configuration useful the way they are???
+# - `$tracert_cmd` should (somehow) be killed, when we receive a signal!
+# Perhaps use IPC::Run?
+# - The following is bad because of three reasons:
+# warning: Corrupted data file:
/data//2008/09/11/Madrid_RedIris.London_GEANT.tracert.dat
+# notice: Skipping /etc/hades/geant/S.Madrid_RedIris.London_GEANT.0.cfg
+# - Why is the file corrupted (there is only the header)?
+# - Perhaps we should "cleanup" corrupted files??
+# - Why is there a // ?
+# Is it also happening on other machines? Especially 193.2.63.2
+# - Normally a YAML file should end with "...\n". Perhaps we should always
+# add this line and remove it before writing new data? Use another IO
+# method? See also auto_terminate of IO::YAML
+# - What to do about umask and also file owner of created files?
+# - Make $tracert, $tracertopts, $interval, $overwrite adjustable for every
+# measurement by adding them to configuration files. Not trivial for
+# $interval.
+
+use strict;
+use warnings;
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+# Commen modules for all Hades/oppd daemons:
+use locale;
+use POSIX qw(setsid setpgid :sys_wait_h);
+use Log::Dispatch;
+use Log::Dispatch::File;
+use Log::Dispatch::Syslog;
+use Log::Dispatch::Screen;
+use Getopt::Long 2.32 qw(:config auto_help auto_version bundling);
+use Pod::Usage;
+use Config::General;
+# DateTime not needed by now, but this would be necessary, because Hades.pm
is
+# NOT loaded.
+#use DateTime;
+#use DateTime::Locale;
+#BEGIN {
+# if (DateTime::Locale->load(setlocale(LC_TIME))) {
+# DateTime->DefaultLocale(setlocale(LC_TIME));
+# }
+#}
+
+# Modules for this daemon:
+use File::Path;
+use File::stat;
+use File::Spec;
+use IO::Socket;
+use IO::Interface qw(:flags);
+use IO::File;
+use YAML;
+
+
+#
+# Important variables that should be available and initialised before the
+# (possible) execution of the END block
+#
+my (
+ $proc_type, $pidfile_ok, $log, $log_prefix, $shutdown_gracefully,
+ $shutting_down
+);
+INIT {
+ $proc_type = "main"; # Some code is executed by all childrens that fork
and
+ # do not exec afterwards. So we have to know
+ # what to do exactly.
+ # See e.g. END block and signal handlers for possible
+ # values.
+ $pidfile_ok = 0; # Care about existing pidfile in END
+ $log = Log::Dispatch->new();
+ # We also need the Log::Dispatch object for option verification quite
early
+ $log_prefix = ""; # Prepended to log message if set. This is intended for
+ # child processes and should not be "missused"!
+ $shutdown_gracefully = 0; # END called without signal is like SIGTERM !!
+ #TODO Use another default?
+ $shutting_down = 0; # This is set directly after entering the END block.
+ # Can be used to determine whether the process is
+ # going down at the moment. Important e.g. in signal
+ # handlers!
+}
+
+
+#
+# Parse Configuration (commandline and file)
+#
+
+my (
+ $configfile, $noconfig,
+ $detach, $syslog, $logfile, $nologfile, $pidfile, $nopidfile,
+ $loglevel, $verbose, $syslog_host, $syslog_ident, $syslog_facility,
+ $tracert, $tracertopts, $configdir, $interval, $overwrite,
+);
+GetOptions(
+ "config=s" => \$configfile,
+ "noconfig" => \$noconfig,
+ "detach|D!" => \$detach,
+ "logfile:s" => \$logfile,
+ "nologfile" => \$nologfile,
+ "pidfile:s" => \$pidfile,
+ "nopidfile" => \$nopidfile,
+ "syslog!" => \$syslog,
+ "syslog-host=s" => \$syslog_host,
+ "syslog-ident=s" => \$syslog_ident,
+ "syslog-facility=s" => \$syslog_facility,
+ "loglevel=s" => \$loglevel,
+ "verbose|v" => \$verbose,
+ "tracert|t=s" => \$tracert,
+ "tracertopts=s" => \$tracertopts,
+ "configdir=s" => \$configdir,
+ "interval=i" => \$interval,
+ "overwrite!" => \$overwrite,
+) or pod2usage(2);
+
+# Determine and load config file
+my %Config = ();
+my $Config;
+if ($noconfig) {
+ $configfile = undef;
+} else {
+ $configfile ||= "/etc/hades/traceroute.conf";
+ $Config = Config::General->new(
+ -ConfigFile => $configfile,
+ -ApacheCompatible => 1,
+ -AutoTrue => 1, # Could bring in some trouble, but it is really nice
;)
+ -CComments => 0, # Parsing is obviously broken in 2.36!
+ # Comments are found everywhere...
+ );
+ %Config = $Config->getall;
+}
+
+
+#
+# Calculate options
+# First not "undef" value is used.
+# Order: command line, config file, default
+#
+$detach = get_opt($detach, $Config{detach}, 1);
+$nologfile = get_opt($nologfile, 0); # No nologfile entry in config file!
+if ($nologfile) {
+ $logfile = undef;
+} else {
+ $logfile = get_opt($logfile, $Config{logfile}, 0);
+ if (!$logfile && $logfile ne "") {
+ # logfile disabled
+ $logfile = undef;
+ } elsif ($logfile eq "1" || $logfile eq "") {
+ # logfile enabled in configuration file or via --logfile without value
+ $logfile = "/var/log/hades-traceroute.log";
+ }
+}
+$nopidfile = get_opt($nopidfile, 0); # No nopidfile entry in config file!
+if ($nopidfile) {
+ $pidfile = undef;
+} else {
+ $pidfile = get_opt($pidfile, $Config{pidfile}, 1);
+ if (!$pidfile && $pidfile ne "") {
+ # pidfile disabled
+ $pidfile = undef;
+ } elsif ($pidfile eq "1" || $pidfile eq "") {
+ # pidfile enabled in configuration file or via --pidfile without value
+ $pidfile = "/var/run/hades-traceroute.pid";
+ }
+}
+$syslog = get_opt($syslog, $Config{syslog}, 0);
+$syslog_host = get_opt($syslog_host, $Config{'syslog-host'}, "");
+$syslog_ident =
+ get_opt($syslog_ident, $Config{'syslog-ident'}, "hades-traceroute");
+$syslog_facility =
+ get_opt($syslog_facility, $Config{'syslog-facility'}, "daemon");
+$loglevel = get_opt($loglevel, $Config{loglevel}, "notice");
+$verbose = get_opt($verbose, 0); # No verbose entry in config file!
+if ($verbose) {
+ $loglevel = "info";
+} else {
+ pod2usage( { -message => "Invalid log level: $loglevel",
+ -exitval => 2 } ) unless $log->level_is_valid($loglevel);
+}
+$tracert = get_opt($tracert, $Config{tracert},
"/bin/traceroute");
+$tracertopts = get_opt($tracertopts, $Config{tracertopts}, undef);
+$configdir = get_opt($configdir, $Config{configdir}, undef);
+unless (defined $configdir) {
+ if (defined $configfile) {
+ my ($vol,$dir,undef) = File::Spec->splitpath($configfile);
+ $configdir = "$vol$dir" || ".";
+ } else {
+ pod2usage( { -message => "No directory for configuration files
specified",
+ -exitval => 2 } );
+ }
+}
+$interval = get_opt($interval, $Config{interval}, 180);
+$overwrite = get_opt($overwrite, $Config{overwrite}, 0);
+
+
+#
+# Start logging ($log already initialised above)
+#
+
+if (defined $logfile) {
+ eval {
+ use locale;
+ use POSIX qw(locale_h strftime);
+ # "use locale", POSIX::strftime and localtime faster than DateTime
+ # and this has quite an impact on debug loging. Detected via SmokePing:
+ # http://perfsonar.acad.bg/smokeping
+ #use DateTime;
+ #use DateTime::Locale;
+ #BEGIN {
+ # if (DateTime::Locale->load(setlocale(LC_TIME))) {
+ # DateTime->DefaultLocale(setlocale(LC_TIME));
+ # }
+ #}
+ $log->add(
+ Log::Dispatch::File->new(
+ name => 'file',
+ min_level => $loglevel,
+ filename => $logfile,
+ mode => 'append', close_after_write => 1, autoflush => 1,
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ $p{message} = strftime("%FT%T",localtime) . " $p{level}
$p{message}";
+ #DateTime->now(time_zone=>"local")->strftime("%c") .
+ return "$p{message}\n";
+ },
+ )
+ );
+ };
+ die "Cannot write to log file '$logfile':
$@\n"
if $@;
+}
+if ($syslog) {
+ eval {
+ my $syslog_socket = 'unix';
+ if ($syslog_host) {
+ $Sys::Syslog::host = $syslog_host;
+ $syslog_socket = 'inet';
+ }
+ $log->add(
+ Log::Dispatch::Syslog->new(
+ name => 'syslog',
+ min_level => $loglevel,
+ ident => "$syslog_ident",
+ facility => "$syslog_facility",
+ socket => "$syslog_socket",
+ logopt => "ndelay",
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ return "$p{message}\n";
+ },
+ #mode => 'append', close_after_write => 0, autoflush => 1,
+ )
+ );
+ };
+ die "Cannot write to syslog:
$@\n"
if $@;
+}
+unless ($detach) {
+ $log->add(
+ Log::Dispatch::Screen->new(
+ name => 'screen',
+ min_level => $loglevel,
+ stderr => 1,
+ callbacks => sub {
+ my
%p=@_;
+ $p{message} = "$log_prefix: $p{message}" if $log_prefix;
+ $p{message} = "$p{level}: $p{message}";
+ return "$p{message}\n";
+ },
+ )
+ );
+}
+
+# More flexible die:
+# Put error into Log and afterwards die with same message.
+# Also handy, because in the following code $@ is undef in die call:
+# $log->error($@); die $@;
+$SIG{__DIE__} = sub {
+ die @_ if $^S; # Ignore dies from evals
+ my $logmsg = join " - ", @_;
+ chomp $logmsg; # No new line for Log::Dispatch !
+ # We should only be called with initialised $log, but we can be a bit
+ # more friendly by only using it if it was initialised:
+ $log->error($logmsg) if defined $log &&
UNIVERSAL::isa($log,'Log::Dispatch');
+ die @_;
+};
+
+# More flexible warn:
+# Put error into Log and afterwards warn with same message.
+$SIG{__WARN__} = sub {
+ my $logmsg = join " - ", @_;
+ chomp $logmsg; # No new line for Log::Dispatch !
+ # We should only be called with initialised $log, but we can be a bit
+ # more friendly by only using it if it was initialised:
+ $log->warning($logmsg)
+ if defined $log && UNIVERSAL::isa($log,'Log::Dispatch');
+ warn @_;
+};
+
+
+#
+# Daemonize
+#
+
+if ($detach) {
+ # Fork once, and let the parent exit.
+ my $pid = fork;
+ if ($pid) { $proc_type = "dummy"; exit; }
+ defined($pid) or die "Could not fork: $!\n";
+
+ # Dissociate from the controlling terminal that started us and stop being
+ # part of whatever process group we had been a member of.
+ setsid() or die "Cannot start a new session: $!\n";
+
+ # In Proc::Daemon there is a second fork executed with the following
comment:
+ # "Forks another child process and exits first child. This prevents the
+ # potential of acquiring a controlling terminal."
+ # This is nowhere else mentioned! Neither in Perl nor standard UNIX
+ # documentation.
+ # IMPORTANT: If you put a second fork here, the process group is most
likely
+ # not correct for sending signals e.g. in the END block!
+
+ # chdir and set umask
+ chdir '/' or die "Cannot chdir to '/': $!\n";
+ #umask 0;
+
+ setup_pidfile() if defined $pidfile;
+ # Do it before closing file handles! We need the error messages!
+
+ # Close default file handles
+ close STDIN or die "Could not close STDIN: $!\n";
+ close STDOUT or die "Could not close STDOUT: $!\n";
+ close STDERR or die "Could not close STDERR: $!\n";
+ # Reopen stderr, stdout, stdin to /dev/null
+ open(STDIN, "</dev/null");
+ open(STDOUT, ">/dev/null");
+ open(STDERR, ">/dev/null");
+} else {
+ setpgid(0,0) or die "Cannot set process group id: $!\n";
+ setup_pidfile() if defined $pidfile;
+}
+
+#
+# Signal handlers
+#
+
+# die on typical signals
+my $time_to_die = 0;
+my $time_to_reset = 0;
+my %cfgs = (); # configuration hashes
+$SIG{INT} = $SIG{TERM} = $SIG{USR1} = $SIG{USR2} = sub {
+ $log->notice("Caught SIG$_[0] - initiating shutdown");
+ $time_to_die = 1;
+ #$shutdown_gracefully not used! Our shutdown is more or less always
graceful!
+};
+$SIG{HUP} = sub {
+ $log->notice("SIGHUP caught - reloading");
+ # Force reload of all configuration files:
+ foreach my $cfg (keys %cfgs) {
+ $cfgs{$cfg}->{mtime} = -1;
+ }
+ $time_to_reset = 1;
+};
+$SIG{PIPE} = 'IGNORE';
+$SIG{TSTP} = $SIG{TTOU} = $SIG{TTIN} = 'IGNORE'; # ignore tty signals
+$SIG{CHLD} = \&REAPER; # Care about child processes. See below.
+
+#
+# Inform that everything looks good
+#
+
+$log->notice("hades-traceroute started");
+$log->info("PID $$ written to $pidfile") if defined $pidfile;
+
+
+#
+# Loop doing the work. Only exiting on signals
+#
+
+my $start_time = 0;
+
+MAIN:
+until ($time_to_die) {
+ if ($start_time) { # 0 for first time / reset
+ # Take run time into account for sleep time
+ my $sleep_time = $interval - (time - $start_time);
+ if ($sleep_time > 0) {
+ # We only need to sleep, if run time wasn't bigger than $interval
+ $log->info("Sleeping $sleep_time seconds");
+ sleep $sleep_time;
+ }
+ }
+ $start_time = time;
+
+ last MAIN if $time_to_die; # Save place to leave
+
+ my $glob = "$configdir/*/S.*.cfg";
+ $log->debug("Glob used for finding configuration files: $glob");
+ my @cfgfiles = sort glob $glob;
+ $log->debug(
+ "Configuration files found via glob: " .
+ (@cfgfiles ? join ", ", @cfgfiles : "No files found")
+ );
+
+ my $found = 0;
+ foreach my $cfgfile (@cfgfiles) {
+ my $epoch = time;
+ my $fdate = get_fdate($epoch);
+ last MAIN if $time_to_die; # Save place to leave
+ next unless $cfgfile =~
+ m#/([^/]+)/S\.([\w\d_-]+)\.([\w\d_-]+)\.([\w\d]+)\.cfg$#; # all cfg
files
+ my ($domain, $sender, $receiver, $fid) = ($1,$2,$3,$4);
+ $log->debug(
+ "Configuration file identified: " .
+ "domain=$domain, sender=$sender, receiver=$receiver, fid=$fid"
+ );
+ next unless $fid eq "0"; # TODO Implement more than one traceroute data
+ # file per route and add missing parameters
+ my $route = "$domain.$sender.$receiver";
+ $log->info("Checking configuration for $route");
+ my $cfghash;
+ if (exists $cfgs{$route}) {
+ # Route already known -> reuse cfghash
+ $cfghash = $cfgs{$route};
+ # Perhaps we need to reread the configuration file:
+ unless (my $st = stat($cfgfile)) {
+ $log->warning("Could not stat $cfgfile: $!");
+ $log->notice("Not able to check or reread $cfgfile");
+ } else {
+ if ($st->mtime > $cfghash->{mtime}) {
+ $log->info("Configuration might have changed");
+ $log->notice("Rereading $cfgfile");
+ #TODO Not switching to a new data file (if necessary) introduces
same
+ # problems as with IPPM. To be fixed in conjunction with HADES
+ # rework...
+ $cfghash = $cfgs{$route} = { %$cfghash, %{init_cfgfile($cfgfile)}
};
+ } else {
+ # Nothing to do. Use info instead of notice!
+ $log->info("Configuration has not changed");
+ }
+ }
+ } else {
+ # We don't need to inform via $log->notice, because this is done in
+ # called subroutines.
+ $cfghash = init_cfgfile($cfgfile);
+ if ($cfghash && set_fdate($cfghash,$fdate) && load_outfile($cfghash)) {
+ $cfgs{$route} = $cfghash;
+ } else {
+ $log->notice("Skipping $cfgfile");
+ next;
+ }
+ }
+ $found++;
+ unless ( $cfghash->{log_header} &&
+ ($fdate eq $cfghash->{log_header}->{date}) ) {
+ # if a new day or no header (first run)
+ $cfghash->{log_header} = {
+ date => $fdate,
+ source => $cfghash->{srcname},
+ dest => $cfghash->{destname}
+ };
+ $cfghash->{log_data} = [];
+ $cfghash->{last_data} = [];
+ set_fdate($cfghash, $fdate);
+ mkDir($cfghash->{outdir});
+ write_header($cfghash);
+ }
+
+ my $srcif = get_interface($cfghash->{srcip});
+ unless (defined $srcif) {
+ my $error = ["ERROR",
+ "Could not find interface with IP $cfghash->{srcip}"];
+ $log->warning($error->[1]);
+ add_entry($cfghash,[$epoch,$error]);
+ $cfghash->{last_data} = $error;
+ next;
+ }
+
+ my $tracert_cmd = $cfghash->{tracert} . " ";
+ if ($cfghash->{mode} eq "traceroute") {
+ $tracert_cmd .=
+ (defined $tracertopts ? $tracertopts : "-m 20 -N 1 -q 3 -w 1") .
+ # "friendly" default values
+ " -i ";
+ } else { # traceproto
+ $tracert_cmd .=
+ (defined $tracertopts ? $tracertopts : "-op -a0") .
+ " -F ";
+ }
+ $tracert_cmd .= "$srcif $cfghash->{destip}";
+ $log->info("Executing: $tracert_cmd");
+ my @tracert = `$tracert_cmd`;
+ last MAIN if $time_to_die; # Save place to leave
+ # `$tracert_cmd` can take long and therefore be interupted
+ # -> check here if $time_to_die
+
+ unless (@tracert) {
+ my $error = ["ERROR","No output from traceroute"];
+ $log->warning($error->[1]);
+ add_entry($cfghash,[$epoch,$error]);
+ $cfghash->{last_data} = $error;
+ next;
+ }
+
+ my @output = ();
+ if ($cfghash->{mode} eq "traceroute") {
+ foreach (@tracert) {
+ my @line = split;
+ next unless $line[0] =~ /^\d+$/;
+ my $ttl = shift @line;
+ @line = grep(!/^\*$/,@line);
+ my ($name,$ip) = ("UNKNOWN","UNKNOWN");
+ # $#line < 0 => Only "*"
+ if ($#line >= 0) {
+ # DNS name and ip in brackets
+ if (defined $line[1] && $line[1] =~ /^\(([^\)]+)\)/) {
+ $ip = $1;
+ $name = $line[0] unless $line[0] eq $ip; # No DNS
+ } elsif (defined $line[0] && $line[0] =~ /^\(([^\)]+)\)/) {
+ $ip = $1;
+ } else {
+ $name = "ERROR";
+ $ip = "$line[0] " . defined($line[1]) ? $line[1] : "UNDEF";
+ }
+ }
+ # TODO do a lot of parsing magic here to analyse errors/warnings from
+ # traceroute ...
+ push @output, "$ttl:$name:$ip";
+ }
+ } else {
+ foreach (@tracert) {
+ chomp;
+ # regexp from Hopwatcher 0.9.2 (see traceproto website)
+ next unless (m/^\d+\s((\d{1,3}\.){3}\d{1,3})\s[A-Z]+\s[\d.]+$/);
+ my ($ttl, $ip, $type, $time) = split / /, $_, 4;
+ my $ipn = inet_aton($ip);
+ my $name = gethostbyaddr($ipn, AF_INET);
+ push @output, "$ttl:$name:$ip";
+ }
+ }
+ unless (@output) {
+ # No valid lines could be extracted. This really happens in rare cases!
+ #TODO Debug was is really going on here...
+ my $error = ["ERROR","No valid output from traceroute"];
+ $log->warning($error->[1]);
+ add_entry($cfghash,[$epoch,$error]);
+ $cfghash->{last_data} = $error;
+ next;
+ }
+ unless ( equal_arrays($cfghash->{last_data},
\@output))
{
+ add_entry($cfghash,[$epoch,
\@output]);
+ $cfghash->{last_data} =
\@output;
+ $log->info("New entry written");
+ } else {
+ my $st = stat($cfghash->{outfile});
+ unless ($st = stat($cfghash->{outfile})) {
+ $log->warning("Could not stat $cfghash->{outfile}: $!");
+ $log->info("Not writing data");
+ } elsif ( $st->mtime < (time - 30*60) ) {
+ # "MARK"
+ add_entry($cfghash,[$epoch, ["MARK"] ]);
+ $log->info("MARK written");
+ } else {
+ $log->info("No change - Nothing written");
+ }
+ }
+ if ($time_to_reset ) {
+ # immediately start over
+ $time_to_reset = 0;
+ $start_time = 0; # Don't sleep!
+ last;
+ }
+ last MAIN if $time_to_die; # Save place to leave
+ }
+ $log->warning("No configuration files found") unless $found;
+}
+
+
+exit 0;
+
+
+
+### END OF MAIN ###
+
+
+
+# Returns the first found parameter with a "defined" value
+sub get_opt {
+ foreach (@_) {
+ return $_ if defined;
+ }
+ return undef;
+}
+
+
+END {
+ # END could be executed without most if the initialisation from above
already
+ # done!
+ # At least the following variables should be already available via the INIT
+ # block (other should be considered to be possibly undef or empty):
+ # $proc_type, $pidfile_ok, $log, $log_prefix, $shutdown_gracefully,
+ # $shutting_down
+ # Keep this also in mind for subs called in the code below!
+ $shutting_down = 1;
+ return if $proc_type eq "dummy"; # Do not execute anything below
+ my $exitcode = $?; # Save $?
+ $log->info("Starting shutdown sequence");
+ # Clean up PID file:
+ unlink $pidfile if $pidfile_ok && -e $pidfile;
+ $log->notice("Exiting");
+ $? = $exitcode; # Restore $?
+}
+
+#
+# setup pid file
+#
+sub setup_pidfile {
+ die("PID file ($pidfile) contains pid! Already running?\n")
+ if -e $pidfile && -s $pidfile;
+ open(PIDFILE, ">$pidfile")
+ or die("Could not write PID file ($pidfile): $!\n");
+ print PIDFILE "$$\n";
+ $pidfile_ok = 1;
+ close PIDFILE
+ or die("Could not write PID file ($pidfile): $!\n");
+}
+
+
+#
+# This is our SIGCHLD handler. Not really doing important things...
+#
+sub REAPER { # see also 'man perlipc'
+ local $!; # waitpid() and others may overwrite current error
+ while ((my $pid = waitpid(-1,WNOHANG)) > 0) {
+ my $reason = $? ? " with exit code $?" : "";
+ $log->debug("Child process $pid exited" . $reason);
+ }
+ $SIG{CHLD} = \&REAPER; # loathe sysV
+}
+
+
+sub add_entry {
+ my ($cfghash,$entry) = @_;
+ write_data($cfghash->{outfile}, $entry);
+ push @{$cfghash->{log_data}}, $entry;
+}
+
+
+sub init_cfgfile {
+ my $cfgfile = shift;
+
+ my %result = ();
+
+ $log->info("Reading $cfgfile");
+
+ # We need the mtime:
+ my $st;
+ unless ($st = stat($cfgfile)) {
+ $log->warning("Could not stat $cfgfile: $!");
+ return;
+ }
+ $result{mtime} = $st->mtime;
+
+ # Now open and read the file:
+ unless (open CFG, "$cfgfile") {
+ $log->warning("Could not open $cfgfile: $!");
+ return;
+ }
+ foreach (<CFG>) {
+ chomp;
+ if (m/^\s*(.+)\s*=\s*(.+)\s*$/) {
+ if ($1 eq "datadir") {
+ $result{datadir} = $2;
+ } elsif ($1 eq "senderip") {
+ $result{srcip} = $2;
+ } elsif ($1 eq "sendername") {
+ $result{srcname} = $2;
+ } elsif ($1 eq "receiverip") {
+ $result{destip} = $2;
+ } elsif ($1 eq "receivername") {
+ $result{destname} = $2;
+# } elsif ($1 eq "tracertprg") {
+# $result{tracert} = $2;
+# } elsif ($1 eq "tracertopts") {
+# $result{tracertopts = $2;
+# } elsif ($1 eq "tracertinterval") {
+# $result{interval} = $2; # TODO what to set how?
+# } elsif ($1 eq "pidpath") {
+# $result{pidpath} = $2;
+ }
+ }
+ }
+ close CFG;
+
+ unless (defined $result{datadir}) {
+ $log->warning("Entry \"datadir\" not found in $cfgfile"); return;
+ }
+# unless (defined $result{pidpath}) {
+# $log->warning("Entry \"pidpath\" not found in $cfgfile"); return;
+# }
+ unless (defined $result{srcip}) {
+ $log->warning("Entry \"sendername\" not found in $cfgfile"); return;
+ }
+ unless (defined $result{srcname}) {
+ $log->warning("Entry \"sendername\" not found in $cfgfile"); return;
+ }
+ unless (defined $result{destip}) {
+ $log->warning("Entry \"receiverip\" not found in $cfgfile"); return;
+ }
+ unless (defined $result{destname}) {
+ $log->warning("Entry \"receivername\" not found in $cfgfile"); return;
+ }
+
+ #TODO "switch" for turning traceroute on/off ???
+
+ if ($tracert) {
+ # First the command line
+ $result{mode} = ($tracert =~ /traceproto/) ? "traceproto" : "traceroute";
+ $result{tracert} = $tracert;
+ } elsif ($result{tracert}) {
+ $result{mode} = ($result{tracert} =~ /traceproto/) ?
+ "traceproto" : "traceroute";
+ } else {
+ $result{mode} = "traceroute";
+ $result{tracert} = "/bin/traceroute";
+ }
+
+ return \%result;
+}
+
+
+sub get_fdate {
+ my $time = defined $_[0] ? $_[0] : time;
+ my ($sec, $min, $hour, $day, $month, $year) = gmtime($time);
+ $year += 1900;
+ $month += 1;
+ return sprintf("%d/%02d/%02d", $year, $month, $day);
+}
+
+
+# Set fdate to cfg hash
+sub set_fdate {
+ my ($cfghash,$fdate) = @_;
+ #TODO catch errors if $cfghash or $fdate wrong?
+ $cfghash->{outdir} = $cfghash->{datadir} . "/" . $fdate;
+ $cfghash->{outfile} = $cfghash->{outdir} . "/" .
+ $cfghash->{srcname} . "." . $cfghash->{destname} . ".tracert.dat";
+ return 1;
+}
+
+
+sub load_outfile {
+ my ($cfghash) = @_;
+
+ unless (-f $cfghash->{outfile}) {
+ $log->info("No existing data file found");
+ $cfghash->{log_header} = undef;
+ # new output prepared in main loop, if $cfghash->{log_header} undef
+ return 1;
+ }
+
+ $log->info("Reading existing data file");
+ ($cfghash->{log_header},my @log_data) =
YAML::LoadFile($cfghash->{outfile});
+ unless ( defined($cfghash->{log_header}) &&
+ (ref($cfghash->{log_header}) eq "HASH") &&
+ defined($log_data[0]) ) {
+ # Something is obviously wrong with this datafile. Most likely its just
+ # empty ;-)
+ # TODO more tests?
+ $log->warning("Corrupted data file: $cfghash->{outfile}");
+ if ($overwrite || -z $cfghash->{outfile}) {
+ unlink $cfghash->{outfile}
+ or $log->warning("Could not delete :$!"), return;
+ } else {
+ return;
+ }
+ }
+ $cfghash->{log_data} =
\@log_data;
+ $log->info("Last data from " . $cfghash->{log_data}->[-1]->[0]);
+ # Determine _real_ last data:
+ $cfghash->{last_data} = [];
+ for ( my $i=$#{$cfghash->{log_data}} ; $i>=0 ; $i-- ) {
+ next if $cfghash->{log_data}->[$i]->[1]->[0] =~ /^MARK/;
+ $cfghash->{last_data} = $cfghash->{log_data}->[$i]->[1];
+ last;
+ }
+ return 1;
+}
+
+
+sub write_header {
+ my ($cfghash) = @_;
+ unless (YAML::DumpFile($cfghash->{outfile},$cfghash->{log_header})) {
+ $log->warning("Could not write header to data file " .
+ $cfghash->{outfile} . ": $!");
+ return;
+ }
+ return 1;
+}
+
+
+sub write_data {
+ my ($outfile,$entry) = @_;
+
+ unless (-w $outfile) {
+ # File has to exist, because at least the header should have been written
+ # before!
+ $log->warning("Could not write to data file $outfile");
+ return;
+ }
+ # IO::YAML is a nice module, that would help to make the following code
+ # "cleaner". The changes for the following code to switch to IO:YAML:
+ # my $io = IO::YAML->new();
+ # $io->print($entry);
+ # But IO::YAML is not available on most operating systems out of the box.
+ # Therefore we aviod it by doing the simple task on our own...
+ my $io = IO::File->new();
+ unless ($io->open($outfile, '>>')) {
+ $log->warning("Could not open data file $outfile for appending: $!");
+ return;
+ }
+ unless (print $io Dump($entry)) {
+ $log->warning("Could not write to data file $outfile: $!");
+ return;
+ }
+ unless ($io->close) {
+ $log->warning("Could not close data file $outfile: $!");
+ return;
+ }
+ return 1;
+}
+
+
+
+sub equal_arrays {
+ my ($first, $second) = @_;
+ no warnings;
+
+ return 0 unless @$first == @$second;
+ for (my $i = 0; $i < @$first; $i++) {
+ return 0 if $first->[$i] ne $second->[$i];
+ }
+ return 1;
+}
+
+
+# Copied from Hades.pm with a view changes (Log::Dispatch)
+sub mkDir {
+ my $dir = shift;
+ if ( -e $dir ) {
+ if ( ! -d $dir ) {
+ die("$dir exists and is not a directory!\n");
+ }
+ } else {
+ mkpath "$dir" or die("Cannot mkdir ${dir}: $!\n");
+ }
+}
+
+
+sub get_interface {
+ my $srcip = shift;
+
+ my $socket = IO::Socket::INET->new(Proto => "udp");
+ my @interfaces = $socket->if_list;
+
+ my $srcipn = inet_aton($srcip);
+
+ if ($srcipn) { # Correct IPv4 address
+ foreach my $if (@interfaces) {
+ my $addr = $socket->if_addr($if);
+ next unless $addr;
+ my $ifipn = inet_aton($addr);
+ return $if if defined($ifipn) && $ifipn eq $srcipn; # == better ???
+ }
+ } else { # Incorrect IPv4 address -> IPv6 address?
+ # IPv6 - Hack
+ open(IFCONFIG, "/sbin/ifconfig|")
+ or $log->warning("Can't execute ifconfig: $!"), return;
+ my $if;
+ while (<IFCONFIG>) {
+ if (/^(\S+)/) {
+ $if = $1;
+ } elsif (/^\s+inet6 addr:\s+([\da-fA-F:]+)\/(\d+).*Scope:Global/) {
+ my $ip = $1; # $mask = $2;
+ return $if if $ip eq $srcip; # Not really correct!!!
+ }
+ }
+ }
+
+ return undef;
+}
+
+
+
+__END__
+
+
+
+=head1 NAME
+
+hades-traceroute.pl - Daemon gathering Hades traceroute information
+
+=head1 SYNOPSIS
+
+B<hades-traceroute.pl> [OPTIONS]
+
+
+
+=head1 DESCRIPTION
+
+B<hades-traceroute.pl> is a daemon that is executing traceroute or similar
+tools (see below) in order to determine and store a hop list to another
+Hades measurement box. It uses the Hades configuration files for finding
these
+other boxes and for configuring the traceroute execution.
+
+Because B<hades-traceroute.pl> uses external traceroute utilities for doing
+the traceroute work, it is important to have the correct traceroute tool
+available. The only normal traceroute tool that can be used without problems
+at the moment is the traceroute by Dmitry Butskoy (see
+L<http://dmitry.butskoy.name/traceroute/>). It is a modern traceroute for
+Linux operating systems and included as default traceroute in at least some
+of the common distributions, e.g. Fedora and RedHat.
+
+Another powerful traceroute tool supported by B<hades-traceroute.pl> is
+I<TraceProto> (L<http://traceproto.sourceforge.net/index.php>).
+It is very flexible and has a machine readable output mode.
+But it's lacking IPv6 so far and development seems to have stopped in 2005.
+
+The traceroute measurements are done one by one in a great loop. After one of
+these "rounds" is finished the next round is started after a specific time
+that can be adjusted via the option B<interval>.
+
+
+
+=head1 OPTIONS
+
+This is a full list of available command line options. Please keep in mind
+that this script does NOT provide the normal Hades command line options
+or configuration file options!
+Some options might even look familiar, although they are used slightly
+different!
+
+Nearly all options have a built in default that can be overwritten using
+command line arguments or variables in the configuration file.
+Arguments have precedence over variables in the configuration file.
+
+
+=over
+
+
+=item B<--help>
+
+Prints a help message and exits.
+
+
+=item B<--config>=F<CONFIGFILE>
+
+Read configuration file F<CONFIGFILE> for options.
+
+Default: F</etc/hades/traceroute.conf>
+
+
+=item B<--noconfig>
+
+Do not read any configuration file. The parameter B<--config> is also
ignored!
+
+Default: off
+
+
+=item B<--[no]detach>
+
+Detach from terminal, aka run in background (instead of foreground).
+Log messages will not be sent to F<STDERR>.
+
+Default: on
+
+Configuration file: B<detach>
+
+
+=item B<--logfile>[=F<LOGFILE>]
+
+Append messages to file F<LOGFILE>.
+
+Just use B<--logfile> without the optional value to enable logging to default
+log file F</var/log/hades-traceroute.log>.
+
+You can use this option together with B<--syslog>.
+Messages will then be written to both, log file and system log.
+
+Default: off
+
+Configuration file: B<logfile>
+
+
+=item B<--nologfile>
+
+Do not write to any log file. The parameter B<--logfile> is also ignored!
+
+Default: off
+
+Configuration file: use B<logfile>
+
+
+=item B<--[no]syslog>
+
+Whether messages should be written to system log.
+
+You can use this option together with B<--logfile>.
+Messages will then be written to both, log file and system log.
+
+Default: off
+
+Configuration file: B<syslog>
+
+
+=item B<--syslog-host>=I<HOST>
+
+Use I<HOST> as host to which system log messages are forwarded.
+
+If this option is set to a dns name or ip address, all system log messages
+are forwarded to the specified remote host.
+If set to the empty string ("") logging is done locally.
+
+Default: log locally
+
+Configuration file: B<syslog-host>
+
+
+=item B<--syslog-ident>=I<IDENT>
+
+The string I<IDENT> will be prepended to all messages in the system log.
+
+Default: I<hades-traceroute>
+
+Configuration file: B<syslog-ident>
+
+
+=item B<--syslog-facility>=I<FACILITY>
+
+Use I<FACILITY> as type of program for system logging.
+
+This string will be used as the system log facility for messages sent to
+the system log.
+
+See your C<syslog(3)> documentation for the facilities available on your
+system.
+Typical facilities are I<auth>, I<authpriv>, I<cron>, I<daemon>, I<kern>,
+I<local0> through I<local7>, I<mail>, I<news>, I<syslog>, I<user>, I<uucp>.
+
+Default: I<daemon>
+
+Configuration file: B<syslog-facility>
+
+
+=item B<--loglevel>=I<LOGLEVEL>
+
+Use I<LOGLEVEL> as log level used for logging to syslog and to the log files.
+
+This option is used for setting the verbosity of the running daemon.
+The log levels available are the log levels defined by Log::Dispatch.
+
+This is a list of values that should be accepted:
+ 0 = debug
+ 1 = info
+ 2 = notice
+ 3 = warning
+ 4 = err = error
+ 5 = crit = critical
+ 6 = alert
+ 7 = emerg = emergency
+
+Default: I<notice>
+
+Configuration file: B<loglevel>
+
+
+=item B<--verbose>
+
+Just a handy abbreviation for B<--loglevel>=I<info>.
+
+Default: not set, see B<--loglevel>
+
+Configuration file: use B<loglevel>=I<info>
+
+
+=item B<--pidfile>[=F<PIDFILE>]
+
+Use PIDFILE as name of pid file.
+The pid file contains the Process ID of the running oppd service.
+
+Just use B<--pidfile> without the optional value to use the default pid file
+F</var/run/hades-traceroute.pid>.
+
+Default: F</var/run/hades-traceroute.pid>
+
+Configuration file: B<pidfile>
+
+
+=item B<--nopidfile>
+
+Do not use a pid file. The parameter B<--pidfile> is also ignored!
+
+Default: off
+
+Configuration file: use B<pidfile>
+
+
+=item B<--tracert|-t>=F<TRACERT>
+
+Use F<TRACERT> as traceroute/traceproto binary for tracing the route.
+Can be an absolute or relative path. For relative paths the PATH environment
+variable is searched. For security reasons you normally avoid using relative
+paths.
+
+Default: F</bin/traceroute>
+
+Configuration file: B<tracert>
+
+
+=item B<--tracertopts>=I<TRACERTOPTS>
+
+This option is a string that is included in the command line used for
+executing the traceroute command. The interface parameter (-i or -F), the
+interface and the destination ip address are added automatically.
+
+Default: "-m 20 -N 1 -q 3 -w 1" for traceroute; "-op -a0" for traceproto
+
+Configuration file: B<tracertopts>
+
+
+=item B<--configdir>=I<CONFIGDIR>
+
+Use I<CONFIGDIR> for finding the configuration files for all routes.
+All files in all subdirectories of this directory starting with "S." and
+with the suffix ".0.cfg" will be used as configuration files.
+
+Default: The same directory the global configuration file was loaded from.
+
+Configuration file: B<>
+
+
+=item B<--interval>=I<SECONDS>
+
+Use I<SECONDS> number of seconds as minimum interval between two traceroute
+measurement rounds.
+
+hades-traceroute will try to start a round of all traceroute measurements
+every number of seconds that you specified using this option. It will wait
+till about the specified number of seconds have passed from the start of
+the previous round before starting the next one. If one round takes longer
+than the interval time, the next round is started immediately.
+
+Default: 180
+
+Configuration file: B<interval>
+
+
+=item B<--[no]overwrite>
+
+Enable this option, if you want hades-traceroute to delete corrupt data
+files. With this option disabled, hades-traceroute will leave corrupt (or
+wrong) data files untouched and NOT do any more traceroute measurements for
+the related configuration!
+
+Default: off
+
+Configuration file: B<overwrite>
+
+
+=back
+
+
+
+=head1 SIGNALS
+
+The hades-traceroute daemon can be controlled by using various signals.
+
+
+=over
+
+
+=item SIGHUP
+
+SIGHUP leads to rereading config files and start over loop of traceroute
+measurements. Daemon is NOT reconfigured (rereading of main configuration
+file).
+
+
+=item SIGINT, SIGTERM and SIGUSR1/2
+
+All four signals lead to a graceful shutdown, that is not interupting a
+running traceroute command. Normally exiting should be fast enough and save
+this way! It can take longer, if traceroute command has problems. Then it's
+also not a problem to kill hades-traceroute.pl using SIGKILL.
+
+A more sophisticated handling might be implemented in the future.
+
+
+=back
+
+
+
+=head1 EXAMPLES
+
+Start with a different configuration file:
+
+ $ hades-traceroute.pl --config=/usr/local/etc/hades-traceroute.conf
+
+Debug the daemon:
+
+ $ hades-traceroute --nodetach \
+ --loglevel=debug --nologfile --nopidfile --nosyslog
+
+Use other some other options instead of the ones from configuration file:
+
+ $ hades-traceroute.pl --port=51234 --nologfile --pidfile=oppd.pid
+
+
+
+=head1 SEE ALSO
+
+hades-traceroute.conf
+=for comment TODO This one has to be written...
+
+
+
+=head1 AUTHORS
+
+DFN Labor Erlangen,
Property changes on: trunk/build/HADES/bin/hades-traceroute.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/bin/hadescfg.pl
===================================================================
--- trunk/build/HADES/bin/hadescfg.pl (rev 0)
+++ trunk/build/HADES/bin/hadescfg.pl 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,601 @@
+#!/usr/bin/perl
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+use Hades;
+use Hades::Config::FileSaver;
+use File::Copy;
+use Pod::Usage;
+
+my @ORIGARGS = @ARGV;
+
+my $config;
+my $saver;
+my $cfg;
+
+my @mainopts = ("datadir", "bindir", "wwwdir", "cfgsdir", "sleeptime",
"verbose", "debug",
+ "portbase", "portmax", "rsync_path", "rsync_rsh",
"htmldocumentroot",
+ "documentroot", "htmlplotcache", "plotcache", "htmlmapdir",
"mapdir",
+ "max_cache_size", "wui_tmpdir", "gnuplot");
+my @hostopts = ("log_path", "bin_path", "cfg_path", "pid_path", "dat_path",
"ip",
+ "name");
+my @ifopts = ("host", "shortname", "ip", "name"); ####### mapcoords
!!!!!!!!!!!!!
+my @sshopts = ("user", "protocol");
+my @routeopts = ("interval", "groupsize", "packetsize", "precedence",
"packetinterval",
+ "transmittime", "verbose", "map", "alert", "sender",
"receiver");
+
+#create_config(configfile => "hades-abc.conf", use_argv => 0) or die "ERROR:
Could not initialize $newdomain!\n";
+#my $config = $Hades::config;
+#print $$config{config}{domain} . "\n";
+#exit;
+
+if (create_config(
+ 'file=s' => undef,
+ 'add=s@' => \$add,
+ 'delete=s@' => \$delete,
+ 'option=s@' => \$option,
+ 'ssh_args=s@' => \$ssh_args,
+ 'identity_files=s@' => \$identity_files
+ )) {
+ $config = $Hades::config;
+ $saver = Hades::Config::FileSaver->new(config => $config);
+ $cfg = $config->{config};
+} else {
+ @ARGV = @ORIGARGS;
+
+ my $newdomain = (grep(/config/, @ORIGARGS))[0];
+ $newdomain =~ s/^--config=//;
+ my $newcfgfile = "$FindBin::RealBin/../etc/hades-${newdomain}.conf";
+ my $templatefile = "$FindBin::RealBin/../lib/Hades/Config/template.conf";
+
+# print "New domain: $newdomain\n";
+ if (-f $newcfgfile) {
+ die "ERROR: $newcfgfile exists!\n";
+ }
+ unless (-f $templatefile) {
+ die "ERROR: $templatefile missing!\n";
+ }
+ copy($templatefile, $newcfgfile);
+# print "newcfgfile: $newcfgfile\n";
+
+ $Hades::configfile = $newcfgfile;
+ create_config(
+ 'file=s' => undef,
+ 'add=s@' => \$add,
+ 'delete=s@' => \$delete,
+ 'option=s@' => \$option,
+ 'ssh_args=s@' => \$ssh_args,
+ 'identity_files=s@' => \$identity_files
+# ) or die "ERROR: Could not initialize $newdomain!\n";
+ ) or pod2usage(2);
+
+ $config = $Hades::config;
+ $saver = Hades::Config::FileSaver->new(config => $config, new => 1);
+ $cfg = $config->{config};
+
+ $$config{config}{domain} = $newdomain;
+}
+
+my $file = $config{file};
+my @add;
+if (ref($config{add}) eq "ARRAY") {
+ my $add = $config{add};
+ @add = @$add;
+}
+my @delete;
+if (ref($config{delete}) eq "ARRAY") {
+ my $delete = $config{delete};
+ @delete = @$delete;
+}
+my @option;
+if (ref($config{option}) eq "ARRAY") {
+ $option = $config{option};
+ @option = @$option;
+}
+my @ssh_args;
+if (ref($config{ssh_args}) eq "ARRAY") {
+ $ssh_args = $config{ssh_args};
+ @ssh_args = @$ssh_args;
+}
+my @identity_files;
+if (ref($config{identity_files}) eq "ARRAY") {
+ $identity_files = $config{identity_files};
+ @identity_files = @$identity_files;
+}
+
+if ($file) {
+ print "file: $file\n";
+ open CFGFILE, $file;
+ while (<CFGFILE>) {
+ chomp;
+ next if m/^#/;
+ next if m/^$/;
+ my ($opt, $val) = split /\s+/, $_, 2;
+ if ($opt eq "host") {
+ addhost($val);
+ } elsif ($opt eq "interface") {
+ addinterface($val);
+ } elsif ($opt eq "route") {
+ addroute($val);
+ } elsif ($opt eq "delete") {
+ deleteentry($val);
+ } elsif ($opt eq "ssh_args") {
+ ssh_args($val);
+ } elsif ($opt eq "identity_files") {
+ identity_files($val);
+ } elsif (scalar grep(/$opt/,@mainopts)) {
+ $$config{config}{$opt} = $val;
+ } else {
+ print "WARNING: unknown command $_\n";
+ }
+ }
+ close CFGFILE;
+} else {
+ foreach (@add) {
+ chomp;
+ my ($opt, $val) = split /\s+/, $_, 2;
+ if ($opt eq "host") {
+ addhost($val);
+ } elsif ($opt eq "interface") {
+ addinterface($val);
+ } elsif ($opt eq "route") {
+ addroute($val);
+ } else {
+ print "WARNING: don't know how to add $val!\n";
+ }
+ }
+ foreach (@delete) {
+ chomp;
+ deleteentry($_);
+ }
+ foreach (@option) {
+ chomp;
+ my ($opt, $val) = split /\s+/;
+ unless (scalar grep(/$opt/,@mainopts)) {
+ print "WARNING: unknown option $opt\n";
+ next;
+ }
+ $$config{config}{$opt} = $val;
+ }
+ foreach (@ssh_args) {
+ chomp;
+ ssh_args($_);
+ }
+ foreach (@identity_files) {
+ chomp;
+ identity_files($_);
+ }
+}
+
+print "configfile: " . $Hades::configfile . "\n";
+$saver->save($Hades::configfile);
+
+sub deleteentry {
+ my $opts = shift @_;
+ my ($opt, $val) = split /\s+/, $opts, 2;
+ if ($opt eq "host") {
+ delhost($val);
+ } elsif ($opt eq "interface") {
+ delinterface($val);
+ } elsif ($opt eq "route") {
+ delroute($val);
+ } else {
+ print "WARNING: unknown delete command $val\n";
+ }
+}
+
+sub delhost {
+ my $name = shift @_;
+ my @ifs = host2if($name);
+ foreach my $if (@ifs) {
+ print "deleting $name $if\n";
+ delinterface($if); # this will delete the dependend routes as well
+ }
+ delete ${$cfg->{hosts}}{$name};
+}
+
+sub delinterface {
+ my $name = shift @_;
+ my $host = if2host($name);
+ foreach my $sender (keys %{$cfg->{routes}}) {
+ if ($sender eq $name) {
+ delete $$cfg{routes}{$sender};
+ next;
+ }
+ foreach my $receiver (keys %{$cfg->{routes}{$sender}}) {
+ if ($receiver eq $name) {
+ delete $$cfg{routes}{$sender}{$name};
+ }
+ }
+ if ($#{$cfg{routes}{$sender}} == -1) { # No route left with this sender
+ delete $$cfg{routes}{$sender}; # Delete the empty hash
+ }
+ }
+ delete ${$cfg->{hosts}}{$host}{interfaces}{$name};
+}
+
+sub if2host {
+ my $if = shift @_;
+ foreach my $host (keys %{$cfg->{hosts}}) {
+ if (exists ${$cfg->{hosts}}{$host}{interfaces}{$if}) {
+ return $host;
+ }
+ }
+ return undef;
+}
+
+sub host2if {
+ my $host = shift @_;
+ print "host: $host\n";
+ $, = "\n";
+# print keys %{$$cfg{hosts}{$host}{interfaces}};
+ return keys %{$$cfg{hosts}{$host}{interfaces}};
+# return keys %{${$cfg->{hosts}}{$host}{interfaces}};
+}
+
+sub delroute {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my %routehash = %{$$cfg{routes_default}};
+ my $sender;
+ my $receiver;
+ do {
+ my $opt = shift @opts;
+ my $val = shift @opts;
+ if ($opt eq "sender") {
+ $sender = $val;
+ } elsif ($opt eq "receiver") {
+ $receiver = $val;
+ } else {
+ $routehash{$opt} = $val;
+ }
+ } while $#opts >= 1;
+ myroute:
+ for (my $i = 0; $i <= $#{$$cfg{routes}{$sender}{$receiver}}; $i++) {
+ my %route = (%{$$cfg{routes_default}},
%{${$$cfg{routes}{$sender}{$receiver}}[$i]});
+ foreach my $ropt (keys %routehash) {
+ if ($route{$ropt} ne $routehash{$ropt}) {
+ next myroute; # this is not the route we were looking for
+ }
+ }
+ splice @{$$cfg{routes}{$sender}{$receiver}}, $i, 1;
+ last;
+ }
+}
+
+sub addhost {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my %hosthash;
+ do {
+ my $opt = shift @opts;
+ my $val = shift @opts;
+ if (scalar grep(/$opt/,@hostopts)) {
+ $hosthash{$opt} = $val;
+ } else {
+ print "WARNING: unknown host option : \"$opt $val\"\n";
+ }
+ } while $#opts >= 1;
+ foreach (keys %hosthash) {
+ next if $_ eq "name";
+ if ($hosthash{name} eq "default") {
+ ${$cfg->{hosts_default}}{$_} = $hosthash{$_};
+ } else {
+ ${$cfg->{hosts}}{$hosthash{name}}{$_} = $hosthash{$_};
+ }
+ }
+}
+
+sub addinterface {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my %ifhash;
+ do {
+ my $opt = shift @opts;
+ my $val = shift @opts;
+ if (scalar grep(/$opt/,@ifopts)) {
+ $ifhash{$opt} = $val;
+ } else {
+ print "WARNING: unknown interface option : \"$opt $val\"\n";
+ }
+ } while $#opts >= 1;
+ unless (exists $ifhash{host}) {
+ print "WARNING: host not given for addinterface $opts!\n";
+ return;
+ }
+ unless (exists $ifhash{name}) {
+ print "WARNING: name not given for addinterface $opts!\n";
+ return;
+ }
+ if (exists $$cfg{hosts}{$ifhash{host}}) {
+ foreach (keys %ifhash) {
+ next if $_ eq "host";
+ next if $_ eq "name";
+ ${$cfg->{hosts}}{$ifhash{host}}{interfaces}{$ifhash{name}}{$_} =
$ifhash{$_};
+ }
+ } else {
+ print "WARNING: No such host $ifhash{host}!\n";
+ }
+}
+
+sub addroute {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my $default = 0;
+ my $sender;
+ my $receiver;
+ if ($opts[0] eq "default") {
+ shift @opts;
+ $default = 1;
+ }
+ my %routehash;
+ do {
+ my $opt = shift @opts;
+ my $val = shift @opts;
+ if (scalar grep(/$opt/,@routeopts)) {
+ if ($opt eq "sender") {
+ $sender = $val;
+ } elsif ($opt eq "receiver") {
+ $receiver = $val;
+ } else {
+ $routehash{$opt} = $val;
+ }
+ } else {
+ print "WARNING: unknown route option: \"$opt $val\"\n";
+ }
+ } while $#opts >= 1;
+ if ($default) {
+ if ($sender) {
+ print "WARNING: cannot set sender for default route!\n";
+ return;
+ }
+ if ($receiver) {
+ print "WARNING: cannot set receiver for default route!\n";
+ return;
+ }
+ foreach (keys %routehash) {
+ ${$cfg->{routes_default}}{$_} = $routehash{$_};
+ }
+ } else {
+ if ($sender eq $receiver) {
+ print "WARNING: sender and receiver are identical ($sender)!\n";
+ return;
+ }
+ unless ($sender and $receiver) {
+ print "WARNING: sender or receiver not given!\n";
+ }
+ if (if2host($sender) and if2host($receiver)) { # valid interfaces
+ push @{$cfg->{routes}{$sender}{$receiver}}, {%routehash};
+ }
+ }
+}
+
+sub ssh_args {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my %sshhash;
+ my $host;
+ do {
+ my $opt = shift @opts;
+ my $val = shift @opts;
+ if ($opt eq "host") {
+ $host = $val;
+ } elsif (scalar grep(/$opt/,@sshopts)) {
+ $sshhash{$opt} = $val;
+ } else {
+ print "WARNING: unknown ssh_args option: \"$opt $val\"\n";
+ }
+ } while $#opts >= 1;
+ if ($host eq "default") {
+ foreach (keys %sshhash) {
+ $$cfg{hosts_default}{ssh_args}{$_} = $sshhash{$_};
+ }
+ } else {
+ if (exists $$cfg{hosts}{$host}) {
+ foreach (keys %sshhash) {
+ $$cfg{hosts}{$host}{ssh_args}{$_} = $sshhash{$_};
+ }
+ } else {
+ print "WARNING: No such host $host!\n";
+ }
+ }
+}
+
+sub identity_files {
+ my $opts = shift @_;
+ my @opts = split /\s+/, $opts;
+ my @files;
+ my $host;
+ my $n = $#opts;
+ for ($i = 0; $i <= $n; $i++) {
+ my $opt = shift @opts;
+ if ($opt eq "host") {
+ $host = shift @opts;
+ $i++;
+ } else {
+ push @files, $opt;
+ }
+ }
+ if ($host eq "default") {
+ @{$$cfg{hosts_default}{ssh_args}{identity_files}} = @files;
+ } else {
+ if (exists $$cfg{hosts}{$host}) {
+ @{$$cfg{hosts}{$host}{ssh_args}{identity_files}} = @files;
+ } else {
+ print "WARNING: No such host $host!\n";
+ }
+ }
+}
+
+__END__
+
+=head1 NAME
+
+hadescfg.pl - Convert HADES config files into perl module style files
+
+=head1 SYNOPSIS
+
+hadescfg.pl --config=DOMAIN --file CONFIGFILE
+
+or
+
+hadescfg.pl --config=DOMAIN [--add "UNIT OPTION VALUE [OPTION2 VALUE2 ...]"]
+[--delete "UNIT OPTION VALUE [OPTION2 VALUE2 ..."]
+[--ssh_args "host HOSTNAME SSH_ARG VALUE [...]"]
+[--identity_files "host HOSTNAME FILE1 [...]"]
+[--option "OPTION VALUE [...]"]
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--config=DOMAIN>
+
+DOMAIN is the HADES measurement domain for which the configuration shall
+take place.
+
+=item B<--file CONFIGFILE>
+
+CONFIGFILE is a text file with configuration commands.
+
+=item B<--add "UNIT OPTION VALUE [OPTION2 VALUE2 ...]">
+
+UNIT is one of: "host", "interface", "route".
+It adds one entry of the respective type to the config file.
+
+OPTIONs are the same as for the corresponding config file commands.
+
+Valid options are:
+
+--add "host name NAME ip IP [log_path LOG_PATH] [bin_path BIN_PATH]
+[cfg_path CFG_PATH] [pid_path PID_PATH] [dat_path DAT_PATH]"
+
+If NAME is "default" the changes apply to the default host section. All
options
+of the default host apply to all hosts which do not have a different setting
for
+that option. Setting the ip for the default host is invalid.
+
+--add "interface name NAME host HOST [ip IP] [shortname SHORTNAME]"
+
+Adds a network interface for HADES measurements. The host must be defined
first.
+If no ip is given the ip of the host is used.
+
+--add "route [default] sender SENDER receiver RECEIVER [interval INTERVAL]
+[groupsize GROUPSIZE] [packetsize PACKETSIZE] [precedence PRECEDENCE]
+[packetinterval PACKETINTERVAL] [transmittime TRANSMITTIME]
+[verbose VERBOSE] [map MAP] [alert ALERT]"
+
+Adds a HADES measurement path (route) from interface SENDER to interface
+RECEIVER. The interfaces must be defined first using '--add "interface ..."'.
+
+If the 'default' keyword is present the default route options are set. These
+apply to all routes without explicitly set options. Setting sender or
receiver for
+the default route is invalid.
+
+=item B<--delete "UNIT OPTION VALUE [OPTION2 VALUE2 ...]">
+
+UNIT is one of: "host", "interface", "route".
+It deletes that entry of the respective type from the config file which
matches the
+set of OPTIONs.
+
+See the paragraph on "--add" for details.
+
+=item B<--ssh_args "host HOSTNAME SSH_ARG VALUE [SSH_ARG2 VALUE2 ...]">
+
+Changes the ssh_args section for host HOSTNAME in the config file.
+
+Valid options and their defaults are:
+
+user "" protocol 2
+
+If HOSTNAME is "default" the ssh_args section for the default host is
changed.
+
+=item B<--identity_files "host HOSTNAME FILE [FILE2 ...]">
+
+Adds ssh identity files (e.g. "~/.ssh/id_rsa") for use with host HOSTNAME.
+
+If HOSTNAME is "default" the identity_files section for the default host is
changed.
+
+=item B<--option "OPTION VALUE [OPTION2 VALUE2 ...]">
+
+Sets option OPTION in the config file to VALUE.
+
+Valid options and their defaults are:
+
+datadir BASEDIR/data bindir BASEDIR/bin wwwdir BASEDIR/www cfgsdir
BASEDIR/cfgs
+sleeptime 0 verbose 0 debug 0
+portbase 60000 portmax 65535 rsync_path /usr/bin/rsync rsync_rsh /usr/bin/ssh
+htmldocumentroot /hades documentroot /var/www/html/hades
+htmlplotcache /hades/plots plotcache /var/www/html/hades/plots
+max_cache_size 1e6 wui_tmpdir /tmp/hades_disp gnuplot /usr/bin/gnuplot
+
+where BASEDIR is
+
+=back
+
+=head1 CONFIGFILE
+
+Details on the config file:
+
+Example:
+
+ # This is a comment
+ #
+ # Set the "sleeptime" option to 42:
+ sleeptime 42
+ #
+ # Set options for the default host:
+ host name default dat_path /opt/data
+ #
+ # Add a new measurement host with all options set to the default host's
+ host name somehost ip 192.168.3.5
+ #
+ # This host has all option except 'dat_path' set to the default host's
+ host name somehost2 ip 192.168.3.7 dat_path /home/joe/data
+ #
+ # Yet another host, config can be split across lines
+ host name somehost3 ip 192.168.3.10
+ host name somehost3 log_path /dev/null
+ #
+ # Add a network interface
+ interface host somehost name somehost_if1 ip 192.168.3.6 shortname shost
+ #
+ # This host has 2 NICs
+ interface host somehost2 name somehost2_if1 ip 192.168.3.8 shortname shost2
+ interface host somehost2 name somehost2_if2 ip 192.168.3.9 shortname shost2b
+ #
+ # This interface will use its host's ip
+ interface host somehost3 name s3if
+ #
+ # All hosts have user 'jane' for ssh
+ ssh_args host default user jane
+ #
+ # except this one
+ ssh_args host somehost user joe
+ #
+ # jane's identity files
+ identity_files host default /home/jane/.ssh/id_rsa /home/jane/.ssh/id_dsa
+ #
+ # joe's identity files
+ identity_files host somehost /home/joe/.ssh/id_rsa
+ #
+ # All measurements use these values
+ route default interval 40000000 groupsize 19 packetsize 99
+ route default precedence 0x2
+ #
+ # Some measurements with default values
+ route sender s2if2 receiver somehost_if1
+ route sender s2if2 receiver s3if
+ # Measurements are unidirectional, if you need both directions you have to
say so
+ route sender s3if receiver s2if2
+ #
+ # Non-standard measurement
+ route sender s3if receiver s2if2 groupsize 5 precedence 0x10
+ #
+ # You can have more than 1 measurement between the same sender/receiver pair
+ # with different options
+ route sender s3if receiver s2if2 groupsize 5 precedence 0x20
+
+
+=head1 DESCRIPTION
+
+Convert HADES config files into perl module style files
Property changes on: trunk/build/HADES/bin/hadescfg.pl
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/etc/hades-analyzed.conf.example
===================================================================
--- trunk/build/HADES/etc/hades-analyzed.conf.example
(rev 0)
+++ trunk/build/HADES/etc/hades-analyzed.conf.example 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,307 @@
+#
+# Example configuration file for the Hades analyze daemon (hades-analyzed)
+#
+# All configuration options are set to their default values.
+# See 'man hades-analyzed' for more information.
+#
+
+
+#
+# detach - Detach from terminal.
+#
+# Detach from terminal, aka run in background (instead of foreground).
+# Log messages will not be sent to STDERR.
+#
+# Valid options: yes, on, 1, true, no, off, 0, false
+#
+# Default: on
+#
+# Example:
+# detach off
+#
+#detach on
+
+
+#
+# logfile - Name of log file.
+#
+# Append messages to the specified file.
+#
+# Set it to no, off, 0, or false to disable log file usage.
+# Set it to yes, on, 1, or true to enable logging to default log file
+# "/var/log/hades-analyzed.log".
+#
+# You can use this option together with "syslog" (see below).
+# Messages will then be written to both, log file and system log.
+#
+# Valid options: yes, on, 1, true, no, off, 0, false, path to log file
+#
+# Default: off
+#
+# Example:
+# logfile "/var/log/hades-analyzed.log"
+#
+#logfile off
+
+
+#
+# syslog - Whether messages should be written to system log.
+#
+# Set it to no, off, 0, or false to disable sending messages to system log.
+# Set it to yes, on, 1, or true to enable sending messages to system log.
+#
+# You can use this option together with "logfile" (see above).
+# Messages will then be written to both, log file and system log.
+#
+# Valid options: yes, on, 1, true, no, off, 0, false
+#
+# Default: off
+#
+# Example:
+# syslog on
+#
+#syslog off
+
+
+#
+# syslog-host - The (optional) host to which system log messages are
forwarded.
+#
+# If this option is set to a dns name or ip address, all system log messages
+# are forwarded to the specified remote host.
+# If set to no, off, 0, false, or "" logging is done locally.
+#
+# Valid options: no, off, 0, false, name or ip of syslog host
+#
+# Default: off
+#
+# Example:
+# syslog-host "syslog.nowhere.com"
+#
+#syslog-host off
+
+
+#
+# syslog-ident - Identification string for system log messages.
+#
+# This string will be prepended to all messages in the system log.
+#
+# Default: hades-analyzed
+#
+# Example:
+# syslog-ident "hades-analyzed"
+#
+#syslog-ident "hades-analyzed"
+
+
+#
+# syslog-facility - Type of program for system logging.
+#
+# This string will be used as the system log facility for messages sent to
+# the system log.
+# See your syslog documentation for the facilities available on your system.
+#
+# Valid options: (Typical facilities.)
+# auth, authpriv, cron, daemon, kern, local0 through local7, mail, news,
+# syslog, user, uucp
+#
+# Default: daemon
+#
+# Example:
+# syslog-facility "local0"
+#
+#syslog-facility "daemon"
+
+
+#
+# loglevel - The log level used for logging to syslog and to the log files.
+#
+# This option is used for setting the verbosity of the running daemon.
+# The log levels available are the log levels defined by Log::Dispatch.
+#
+# Valid options: (This is a list of values that should be accepted.)
+# 0 = debug
+# 1 = info
+# 2 = notice
+# 3 = warning
+# 4 = err = error
+# 5 = crit = critical
+# 6 = alert
+# 7 = emerg = emergency
+#
+# Default: notice
+#
+# Example:
+# loglevel "info"
+#
+#loglevel "notice"
+
+
+#
+# pidfile - Name of pid file.
+#
+# Set it to no, off, 0, or false to disable pid file usage.
+# Set it to yes, on, 1, or true to write pid file to default position
+# "/var/run/hades-analyzed.pid".
+#
+# Valid options: yes, on, 1, true, no, off, 0, false, path to pid file
+#
+# Default: /var/run/hades-analyzed.pid
+#
+# Example:
+# pidfile off
+#
+#pidfile "/var/run/hades-analyzed.pid"
+
+
+#
+# analyzer - Path to analyzer script.
+#
+# The analyzer script to run for doing the "real" work.
+# Can be an absolute or relative path. For relative paths the PATH
environment
+# variable is searched. For security reasons you normally avoid using
relative
+# paths.
+# NOTE: You cannot add command line options here! Only the path to the binary
+# (without white space)!
+#
+# Default: The script hades-analyzer.pl residing in the same directory as
+# hades-analyzed.pl is used as default.
+#
+# Example:
+# analyzer "/usr/local/bin/hades-analyzer.pl"
+#
+#analyzer "PATH_TO_ANALYZED/hades-analyzer.pl"
+
+
+#
+# domain - Configure domains that should be handled by analyzed.
+#
+# For every domain that should be handled by analyzed you should put one
+# domain line in the configuration file. The value is a string representing a
+# call to hades-analyzer. The first word in the line is the domain or the
+# Hades configuration file for the domain (parameter --config of
+# hades-analyzer.pl). The rest of the line is split on white space and used
as
+# parameters for hades-analyzer.pl as they are.
+#
+# There is no shell like quoting available at the moment. Parameters are
+# directly passed to IPC::Run via start().
+#
+# You normally add at least the parameters --updatedb and --map to write
+# meta data to the data base and call hades-mkmap.pl in order to generate the
+# weather maps.
+#
+# The output (STDOUT and STDERR) of the executed hades-analyzer.pl is parsed
+# and filtered to some extent. Some messages are just thrown away, other are
+# sent to the log with other loglevels. You can use --verbose and --debug
+# in the domain options to log more information, but the parsing is NOT
+# adjusted. You will e.g. not see the messages thrown away by the parsing and
+# the additional output will also not be handled in any way, just passed on!
+#
+# Do NOT use parameters used by analyzed itself! At the moment the following
+# parameters are used: --config, --year, --month, --day, --sleeptime, --gzip
+#
+# Default: One domain specified by the file hades.conf in the etc
subdirectory
+# in the directory below the directory hades-analyzed.pl is located.
+# Of course, no additional parameters are used.
+#
+# Example:
+# domain domain1 --updatedb --map
+# domain domain2 --updatedb --map --wwwdir=/tmp/save
+# domain domain2 --updatedb --map --current --status
+#
+#domain PATH_TO_ANALYZED/../etc/hades.conf
+
+#
+# timer-warn - Timeout for warning message about long running analyzers.
+#
+# If an hades-analyzer.pl for a domain is running longer than the specified
+# time in seconds, a warning message will be created.
+#
+# Default: 3600 (one hour)
+#
+# Example:
+# timer-warn 1800
+#
+#timer-warn 3600
+
+#
+# timer-kill - Timeout for killing long running analyzers.
+#
+# If an hades-analyzer.pl for a domain is running longer than the specified
+# time in seconds, it will get killed by first sending SIGTERM and
+# then SIGKILL.
+#
+# Default: 10800 (three hours)
+#
+# Example:
+# timer-kill 7200
+#
+#timer-kill 10800
+
+#
+# polling-sleeptime - Sleep time for analyzer polling.
+#
+# This variable allows you to set a time in seconds that will tell
+# hades-analyzed.pl how long to sleep between polling every running
+# hades-analyzer.pl. This value should not be to high, because it will slow
+# down hades-analyzed.pl significantly especially when a lot of domains are
+# configured. It will also lead to a significant delay till a finished
+# hades-analyzer.pl is started again.
+#
+# Default: 1
+#
+# Example:
+# polling-sleeptime 3
+#
+#polling-sleeptime 1
+
+#
+# max-polling-errors - Threshold before analyzer will be killed.
+#
+# If an hades-analyzer.pl cannot be polled for new output on STDOUT and
+# STDERR it is considered to hang. Instead of killing it at once, we keep on
+# trying the specified number of times. These retries are done in normal
+# polling order, so all other domains are polled, before the one that failed
+# is tried again. Nevertheless the polling takes place quite frequently
+# (depending also on polling-sleeptime, see above) and therefore
+# max-polling-errors does not need to be small to kill the hanging
+# hades-analyzer.pl fast enough.
+#
+# Default: 10
+#
+# Example:
+# max-polling-errors 5
+#
+#max-polling-errors 10
+
+#
+# retry-start-delay - Delay till retrying to restart a failing analyzer.
+#
+# If executing an hades-analyzer.pl fails, hades-analyzed.pl will not try
+# to start it again at once, but wait at least the specified time in seconds.
+# This prevents too many consecutive tries and the lots of error messages you
+# will get. Normally an error condition is not going away fast.
+#
+# Default: 600 (five minutes)
+#
+# Example:
+# retry-start-delay 300
+#
+#retry-start-delay 600
+
+#
+# day-begin-threshold - Time to wait before a new day begins.
+#
+# It is not necessary that all system clocks involved in Hades measurements
are
+# hardware synchronised. This makes a proper day change difficult. Therefore
+# hades-analyzed.pl has the possibility to set a threshold in seconds that
+# delays the transition to a new day (if necessary) so that the final
analysis
+# of yesterday is started not earlier than day-begin-threshold seconds after
+# midnight.
+#
+# Default: 60 (one minute)
+#
+# Example:
+# day-begin-threshold 120
+#
+#day-begin-threshold 60
+
Added: trunk/build/HADES/etc/hades-analyzed.init
===================================================================
--- trunk/build/HADES/etc/hades-analyzed.init (rev
0)
+++ trunk/build/HADES/etc/hades-analyzed.init 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,123 @@
+#!/bin/bash
+#
+# Init file for the Hades data retrieval and analyzer daemon
+#
+# chkconfig: 2345 98 02
+# description: Hades analyzer daemon
+#
+# processname: hades-analyzed
+# config: /etc/hades/analyzed.conf
+# pidfile: /var/run/hades-analyzed.pid
+
+# source function library
+. /etc/rc.d/init.d/functions
+
+# Defaults for configuration options
+HADES_ANALYZED=/usr/sbin/hades-analyzed
+CONF_FILE=/etc/hades/analyzed.conf
+PID_FILE=/var/run/hades-analyzed.pid
+LOG_FILE=""
+USER=hades
+GROUP=hades
+OPTIONS=""
+
+# source sysconfig settings
+sysconfig=/etc/sysconfig/hades-analyzed
+[ -f $sysconfig ] && . $sysconfig
+
+# In order to help keeping the code from hades-analyzed.init,
+# hades-traceroute.init, and oppd.init in sync, we use the following
+# helper variables:
+PROG=$HADES_ANALYZED
+prog="hades-analyzed"
+
+# Check that the user exists (if we set a user)
+# Does the user exist?
+if [ -n "$USER" ] ; then
+ if getent passwd | grep -q "^$USER:"; then
+ # Obtain the uid and gid
+ USERUID=`getent passwd |grep "^$USER:" | awk -F : '{print $3}'`
+ USERGID=`getent passwd |grep "^$USER:" | awk -F : '{print $4}'`
+ else
+ echo "The user $USER, required to run $NAME does not exist." 1>&2
+ exit 1
+ fi
+fi
+
+RETVAL=0
+
+start()
+{
+ echo -n $"Starting $prog: "
+ touch "$PID_FILE"
+ chown $USER:$GROUP "$PID_FILE"
+ if [ -n "$LOG_FILE" ]
+ then
+ touch $LOG_FILE
+ chown $USER:$GROUP $LOG_FILE
+ OPTIONS="$OPTIONS --logfile=$LOG_FILE"
+ fi
+ daemon --user=$USER --pidfile="$PID_FILE" \
+ $PROG --config="$CONF_FILE" --pidfile="$PID_FILE" $OPTIONS \
+ && success || failure
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/$prog
+ echo
+}
+
+stop()
+{
+ echo -n $"Stopping $prog: "
+ # Wait a bit longer (-d) before issuing a SIGKILL! We have forked
+ # analyzer processes to wait for...
+ #TODO Perhaps we should kill the forked analyzer if analyzed has problems
+ # with it?
+ # See sshd init script for an example that might work here as well.
+ killproc -p "$PID_FILE" -d 10 $PROG
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$prog
+ # Daemon should delete its pid file, but perhaps it doesn't have the right
+ # to do it!!
+ rm -f $PID_FILE
+ echo
+}
+
+reload()
+{
+ echo -n $"Reloading $prog: "
+ killproc -p "$PID_FILE" $PROG -HUP
+ RETVAL=$?
+ echo
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ reload)
+ reload
+ ;;
+ condrestart)
+ if [ -f /var/lock/subsys/$prog ] ; then
+ stop
+ start
+ fi
+ ;;
+ status)
+ status -p "$PID_FILE" $PROG
+ #TODO This will more or less show you all running processes with the
+ # name $PROG! Is this a bug in /etc/rc.d/init.d/functions ?
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|restart|reload|condrestart|status}"
+ RETVAL=1
+esac
+exit $RETVAL
Property changes on: trunk/build/HADES/etc/hades-analyzed.init
___________________________________________________________________
Name: svn:executable
+ *
Added: trunk/build/HADES/etc/hades-analyzed.sysconfig
===================================================================
--- trunk/build/HADES/etc/hades-analyzed.sysconfig
(rev 0)
+++ trunk/build/HADES/etc/hades-analyzed.sysconfig 2011-03-18 13:05:36
UTC (rev 692)
@@ -0,0 +1,30 @@
+# Path to hades-analyzed.
+#HADES_ANALYZED=/usr/sbin/hades-analyzed
+
+# Configuration file to be used by hades-analyzed.
+#CONF_FILE=/etc/hades/analyzed.conf
+
+# pid file to use. The init script overwrites the pid file used (from
+# configuration file or default) to make sure that the init script can
control
+# the daemon via the pid file. Therefore you can change the pid file ONLY
here!
+#PID_FILE=/var/run/hades-analyzed.pid
+
+# log file to use. If you set a log file here, it will not only be set as the
+# log file used by the daemon (overriding value from configuration file). The
+# daemon will also care about the ownership of the file based on the
variables
+# USER and GROUP you can find below.
+#LOG_FILE=""
+
+# User to run the daemon as. Files (especially the pid file and log file, see
+# above) created by the daemon will be owned by this user.
+# For hades-analyzed this is also important because of various SSH related
+# issues!
+#USER=hades
+
+# Group id used for pid file and log file (see above).
+#GROUP=hades
+
+# Additional command line arguments (overriding options from configuration
+# file!).
+#OPTIONS=""
+
Added: trunk/build/HADES/etc/hades-example.conf
===================================================================
--- trunk/build/HADES/etc/hades-example.conf (rev
0)
+++ trunk/build/HADES/etc/hades-example.conf 2011-03-18 13:05:36 UTC (rev
692)
@@ -0,0 +1,304 @@
+# First inform the most important editors that this file contains Perl code.
+# Order of appearance is not representing any ranking! Or is it?...
+# Hi, vim:syntax=perl
+# Hi, Emacs -*-Mode: perl-*-
+
+#TODO (for template.conf)
+# - Correlate variables to command line options.
+
+# Enable warnings and strict, but since this "script" is incomplete don't
warn
+# about everything:
+use warnings;
+no warnings "once";
+use strict;
+no strict "vars";
+
+#
+# All available variables are listed here in this file with the default
values
+# set.
+# Remember: Everything is Perl code, that is executed during runtime!
+#
+
+#
+# $verbose and $debug control the amount of information the Hades tools print
+# out when running. Both set to false, leads to very little progress
+# information. Lots of tools will not even print anything, because they are
+# intended to run as cron job or daemon. With $verbose set, basic information
+# is provided, whereas $debug leads to a very chatty output for most tools,
+# that are really only relevant for debugging.
+# Important: Setting $debug does not automatically set $verbose!
+#
+$verbose= 0;
+$debug = 0;
+
+#
+# $domain is a very important parameter that helps in running different Hades
+# instances on the same machines. E.g. the DFN and GEANT networks can share
+# the same server and even some of the measurement boxes by using two
+# different domains "win" and "geant".
+#
+$domain = "example";
+
+#@codomains
= qw(example2 example 3);
+
+#
+# The following variables tell Hades where to look for the necessary files.
+# $bindir is pointing to the directory where the Hades framework scripts
are
+# located.
+# $datadir is the directory containing the raw data files.
+# $wwwdir is the directory containing the aggregated data files.
+# $cfgdir is the directory where the generated configuration files for the
+# measurement boxes will be kept, making it possible to copy them
+# to the boxes.
+# The following examples use the variable $basedir, that contains the base
+# directory of the Hades framework installation. The default for this
directory
+# is determined using Perl's internal mechanisms.
+#
+#$datadir = "$basedir/data";
+#$bindir = "$basedir/bin";
+#$wwwdir = "$basedir/www";
+#$cfgsdir = "$basedir/cfgs";
+$bindir = "$basedir/bin";
+$datadir = "/data/hades/$domain/data";
+$wwwdir = "/data/hades/$domain/www";
+$cfgsdir = "/data/hades/$domain/cfgs";
+
+#
+# $sleeptime is normally only used in analyzer.pl. It specifies the
+# sleep time in seconds after processing every host.
+#
+#$sleeptime = 0;
+$sleeptime = 0;
+
+#
+# $portbase and portmax set the lowest and highest port that are available
for
+# the Hades measurements.
+#
+$portbase = 50000;
+$portmax = 59999;
+
+#
+# The variables $rsync_path and $rsync_rsh tell Hades the path to the rsync
+# binary and the content of the RSYNC_RSH environment variable to use for
+# syncing the configuration fils to the measurement boxes.
+#
+$rsync_path = "/usr/bin/rsync";
+$rsync_rsh = "/usr/bin/ssh";
+
+#
+# The variable $metadb_service denotes the service name (from
pg_service.conf;
+# see http://www.postgresql.org/docs/8.2/static/libpq-pgservice.html or
similar
+# for further information) that should be used as meta data database.
+#
+$metadb_service = "hades";
+
+#
+# The following variables are used for configuring the web user interface.
+# TODO add more docs!
+#
+$htmldocumentroot = "/hades";
+$documentroot = "/opt/hades/html/";
+$htmlplotcache = "$htmldocumentroot/plots/$domain";
+$plotcache = "$documentroot/plots/$domain";
+$htmlmapdir = "$htmldocumentroot/maps/$domain";
+$mapdir = "$documentroot/maps/$domain";
+$max_cache_size = 1e6;
+$wui_tmpdir = "/tmp/hades_$domain";
+$ploticus = "/data/ploticus/pl/src/pl";
+$gnuplot = "/usr/bin/gnuplot";
+
+#
+# The weather map creation requires a bunch of settings. Every map that
should
+# be created gets its own hash in the @map.
+# TODO add more docs!
+#
+# Example:
+#
+#@maps
= (
+# {
+# name => "overview",
+# type => "coords",
+# image => "map_$domain.png",
+# imagemap => ".imagemap_$domain.dat",
+# width => 1200,
+# height => 800,
+# max_owd => 0.02,
+# background => "opaque",
+# legend_names_y => 20,
+# legend_names_x => 820,
+# legend_names_colums => 2,
+# },
+# {
+# name => "backbone",
+# type => "circle",
+# interfaces => [
+# "Host1_I1", "Host2_I3", #....
+# ],
+# image => "map_${domain}2.png",
+# imagemap => ".imagemap_${domain}2.dat",
+# width => 1000,
+# height => 800,
+# center_x => 370,
+# center_y => 370,
+# radius_x => 340,
+# radius_y => 340,
+# max_owd => 0.1,
+# background => "transparent",
+# legend_names_y => 70,
+# legend_names_x => 780,
+# legend_names_colums => 1,
+# },
+#);
+#
+@maps
= (
+ {
+ type => "coords",
+ mtype => "median",
+ image => "map_$domain.png",
+ imagemap => ".imagemap_$domain.dat",
+ width => 1200,
+ height => 700,
+ max_owd => 0.02,
+ background => "opaque",
+ legend_names_y => 20,
+ legend_names_x => 820,
+ legend_names_colums => 2,
+ shorts_length => 1,
+ },
+ {
+ type => "coords",
+ mtype => "loss",
+ image => "map_${domain}_loss.png",
+ imagemap => ".imagemap_${domain}_loss.dat",
+ width => 1200,
+ height => 800,
+ max_owd => 0.02,
+ background => "opaque",
+ legend_names_y => 20,
+ legend_names_x => 820,
+ legend_names_colums => 2,
+ shorts_length => 1,
+ },
+ {
+ name => "star",
+ type => "star",
+ mtype => "median",
+ interfaces => [],
+ hide_unrelated => 0,
+ image => "map_star_*.png",
+ imagemap => ".imagemap_star_*.dat",
+ width => 1200,
+ height => 800,
+ center_x => 380,
+ center_y => 370,
+ radius_x => 340,
+ radius_y => 340,
+ max_owd => 0.02,
+ background => "transparent",
+ legend_names_y => 10,
+ legend_names_x => 780,
+ legend_names_colums => 2,
+ shorts_length => 1,
+ },
+);
+
+#
+# The default options for the hosts in %hosts.
+# "ssh_args" are necessary to connect to the hosts. The *_path variables set
+# important paths that are used on the measurement boxes.
+# The other variables like "ip" and "interfaces" _must_ be set for every
entry
+# in %hosts. See below in the description for %hosts.
+# Important: "ssh-args" are completely overwritten when
+# set in the config file. TODO Is this really as it should be?
+# protocol version 2 is important! You should not change it...
+#
+#
+# identity file "/var/www/.ssh/id_rsa" is for "web user"
+#
+%hosts_default = (
+ ssh_args => {
+ user => "labor",
+ identity_files => [
+ "/data/hades/.ssh/identity",
+ ],
+ protocol => 2,
+ },
+ bin_path => "/usr/local/bin",
+ log_path => "/var/log/hades/$domain",
+ pid_path => "/var/run/hades/$domain",
+ cfg_path => "/etc/hades/$domain",
+ dat_path => "/data/hades/$domain",
+);
+
+#
+# %hosts tells Hades which hosts are available and which interfaces they
+# provide.
+# Every host _must_ have an "ip" and at least one "interface"
+#
+# Example:
+#
+# %hosts = (
+# "Host1" => {
+# ip => "1.2.3.4",
+# interfaces => {
+# "Host1_Interface1" => {
+# shortname => "H1_I1"
+# },
+# "Host1_Interface2" => {
+# shortname => "H1_I2"
+# },
+# }
+# },
+# );
+#
+
+
+#
+# The default options for the routes in %routes.
+# Note: All intervals are in nano-secs!
+#
+# interval Interval between sending two groups.
+# groupsize Number of packets in one group.
+# packetsize Size of one packet.
+# precedence ToS bit.
+# packetinterval Interval between sending two packets.
+# transmittime The estimated OWD of a typical packet to have a threshold
for
+# the receiver's waiting time.
+# verbose Verbose flag for sender and receiver operations.
+# map Is this route part of the map?
+# alert Should there be alerts on this route?
+#
+%routes_default = (
+ "interval" => 60000000,
+ "groupsize" => 9,
+ "packetsize" => 41,
+ "precedence" => "0x0",
+ "packetinterval" => 10000,
+ "transmittime" => 50000,
+ "verbose" => 0,
+ "map" => 0,
+ "alert" => 0,
+);
+
+#
+# %routes tells Hades which measurements should be started between the hosts.
+#
+# Example:
+#
+# %routes = (
+# "Host1_Interface1" => {
+# "Host2_Interface1" => [
+# {
+# },
+# ],
+# "Host2_Interface2" => [
+# {
+# packetsize => 1472,
+# },
+# ],
+# },
+# );
+#
+
+
+1; # Last statement must evaluate to "true"
Added: trunk/build/HADES/lib/Hades.pm
===================================================================
--- trunk/build/HADES/lib/Hades.pm (rev 0)
+++ trunk/build/HADES/lib/Hades.pm 2011-03-18 13:05:36 UTC (rev 692)
@@ -0,0 +1,496 @@
+package Hades;
+
+#TODO
+# - remove obsolote &equal_tracerts and &join_tracerts, when possible
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib "$FindBin::RealBin/../lib";
+
+# Locale stuff
+use locale;
+use POSIX qw(locale_h);
+use DateTime;
+use DateTime::Locale;
+BEGIN {
+ if (DateTime::Locale->load(setlocale(LC_TIME))) {
+ DateTime->DefaultLocale(setlocale(LC_TIME));
+ }
+}
+
+#DEBUG
+use Data::Dumper;
+#DEBUG
+
+use Carp;
+
+use File::Path;
+use File::Basename;
+use Socket;
+
+use Hades::Config;
+use Hades::I18N;
+
+
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+
+ # set the version for version checking
+ $VERSION = "0.01";
+ # if using RCS/CVS, this may be preferred
+ #$VERSION = sprintf "%d.%03d", q$Revision: 1.1 $ =~ /(\d+)/g;
+
+ @ISA = qw(Exporter);
+ @EXPORT = qw(
+ &create_config %config $config &init_config
+ &get_hosts &get_routes &get_interfaces &if2host &hostid
+ $SSH_ERR_TYPE $SSH_ERR_MSG ssh_err_type ssh_err_msg
+ &ssh_ip &ssh_cmd &ssh_cp &ssh_put2dir &ssh_put2file
+ &mkDir &name
+ $i18n &pmt &mt
+ &Dumper_html
+ &equal_tracerts &join_tracerts
+ );
+ %EXPORT_TAGS = (
+ # eg: TAG => [ qw!name1 name2! ],
+ );
+ @EXPORT_OK = qw();
+
+}
+our @EXPORT_OK;
+
+
+( our $realbindir = $FindBin::RealBin ) =~ s{/*$}{};
+#( our $basedir = $realbindir ) =~ s{/(cgi-)?bin$}{};
+( our $basedir = $realbindir ) =~ s{/(bin|cgi-.+|tools)$}{}; #TODO ".." or
"catdir" or ....
+
+our $config = undef; # The Hades::Config object for "global" configuration
+our %config = (); # The old style "config hash"
+ # IMPORTANT: Copy of $config->{config}. Modify with care!
+our $configfile = undef; # Change before calling create_config, if you like
+
+our ($SSH_ERR_TYPE,$SSH_ERR_MSG) = (0,"");
+ # SSH_ERR_TYPE: 0 - no error
+ # 1 - initialisation error
+ # 2 - login error
+ # 3 - execution error
+
+
+# I18N
+our $i18n = Hades::I18N->get_handle() or croak "Could not determine
language!";
+sub pmt { print($i18n->maketext(@_)) }
+sub mt { return $i18n->maketext(@_) }
+
+
+#DEBUG
+sub Dumper_html {
+ my $tmp;
+ ($tmp = Dumper(@_)) =~ s/\n/<br>/g;
+ return $tmp;
+}
+#/DEBUG
+
+
+# IP -> DNS-Name
+sub name {
+ my $ip = $_[0];
+ my $addr_packed = inet_aton($ip);
+ return undef unless defined $addr_packed; # TODO Not working for IPv6 !!!
+ (my $name, my $aliases, my $addrtype, my $length, my @addrs) =
gethostbyaddr($addr_packed,AF_INET);
+ return $name;
+}
+
+
+#
+# Functions for global config
+#
+sub create_config {
+ $config = Hades::Config->new(configfile => $configfile, use_argv => 1);
+ $config->init(@_);
+ %config = %{$config->{config}};
+ return %config;
+}
+
+sub get_hosts { return $config->get_hosts(); }
+sub get_routes { return $config->get_routes(); }
+sub get_interfaces { return $config->get_interfaces(); }
+sub hostid { return $config->hostid(@_); }
+sub if2host { return $config->if2host(@_); }
+
+sub ssh_ip {
+ my $host_hash = shift;
+
+ my $ip = $host_hash->{ssh_args}->{ip};
+ return unless defined $ip;
+ return unless $ip =~ /^\d+\.\d+\.\d+\.\d+$/;
+ return $ip;
+}
+
+
+sub ssh_err_type {
+ my $type = shift;
+ if ($type==0) {
+ return "no error";
+ } elsif ($type==1) {
+ return "initialisation error";
+ } elsif ($type==2) {
+ return "login error";
+ } elsif ($type==3) {
+ return "execution error";
+ }
+ return;
+}
+
+sub ssh_err_msg {
+ return "ssh error (".ssh_err_type($SSH_ERR_TYPE)."): $SSH_ERR_MSG";
+}
+
+sub ssh_set_err {
+ my ($type,$msg) = @_;
+ $msg =~ s/ at \/.+ \d.+$//;
+ ($SSH_ERR_TYPE,$SSH_ERR_MSG) = (1,$msg);
+}
+
+sub ssh_reset_err {
+ ($SSH_ERR_TYPE,$SSH_ERR_MSG) = (0,"");
+ return;
+}
+
+sub get_ssh_connection_key {
+ my $ip = shift;
+ my $ssh_args = shift;
+ return
+ "ip=$ip;" .
+ join ';',
+ map { $_."=".$ssh_args->{$_} } sort keys %{$ssh_args};
+ #TODO passt's so?
+}
+
+my %ssh_connections = ();
+sub get_ssh_connection {
+ my $host_hash = shift;
+ my $ssh_args = shift || {}; # "accept" undef
+ my %ssh_args = (
+ debug => $config{debug}, %{$host_hash->{ssh_args}}, %{$ssh_args}
+ );
+
+ require Net::SSH::Perl;
+
+ ssh_reset_err();
+
+ my $ip = ssh_ip($host_hash);
+ unless (defined $ip) {
+ ssh_set_err(1, "Cannot determine IP address for " .
$host_hash->{hostid});
+ return;
+ }
+ my $key = get_ssh_connection_key($ip,\%ssh_args);
+ return $ssh_connections{$key} if exists $ssh_connections{$key};
+
+ my $ssh_connection = undef;
+ eval {
+ local $SIG{ALRM} = sub {die "SSH timeout!\n"};
+ alarm 60;
+ eval {
+ $ssh_connection = Net::SSH::Perl->new($ip,%ssh_args);
+ };
+ unless ($@ eq "" && defined($ssh_connection)) {
+ ssh_set_err(1,$@ || "Unknown error");
+ return;
+ }
+ eval {
+ $ssh_connection->login();
+ };
+ unless ($@ eq "") {
+ ssh_set_err(2,$@);
+ return;
+ }
+ alarm 0;
+ };
+ $ssh_connections{$key} = $ssh_connection;
+ return $ssh_connection;
+}
+
+# This subroutine behaves differently on whether executed in scalar or list
+# context!
+sub ssh_cmd {
+ my $host_hash = shift;
+ my $ssh_args = shift;
+ my $cmd = shift;
+
+ require Net::SSH::Perl;
+
+ ssh_reset_err();
+
+ my $ssh = get_ssh_connection($host_hash,$ssh_args);
+ return unless $ssh; # error handling in get_ssh_connection()
+
+ my ($out, $err, $exit);
+ eval {
+ local $SIG{ALRM} = sub {die "SSH command timeout!\n"};
+ alarm 60;
+ eval {
+ ($out, $err, $exit) = $ssh->cmd($cmd);
+ };
+ alarm 0;
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ };
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ return ($out, $err, $exit) if wantarray;
+ print $out if $out;
+ print "###STDERR\n$err###STDERR END\n" if $err;
+ return $exit;
+}
+
+
+my %sftp_connections = ();
+sub get_sftp_connection {
+ my $host_hash = shift;
+ my $ssh_args = shift || {}; # "accept" undef
+ my %ssh_args = (
+ debug => $config{debug}, %{$host_hash->{ssh_args}}, %{$ssh_args}
+ );
+
+ require Net::SFTP;
+
+ ssh_reset_err();
+
+ my $ip = ssh_ip($host_hash);
+ unless (defined $ip) {
+ ssh_set_err(1, "Cannot determine IP address for " .
$host_hash->{hostid});
+ return;
+ }
+ my $key = get_ssh_connection_key($ip,\%ssh_args);
+ return $sftp_connections{$key} if exists $sftp_connections{$key};
+
+ my $sftp_connection = undef;
+ eval {
+ local $SIG{ALRM} = sub {die "SSH timeout!\n"};
+ alarm 60;
+ eval {
+ $sftp_connection = Net::SFTP->new( $ip, 'ssh_args' => [%ssh_args] );
+ };
+ unless ($@ eq "" && defined($sftp_connection)) {
+ ssh_set_err(2,$@ || "Unknown error");
+ return;
+ }
+ alarm 0;
+ };
+ $sftp_connections{$key} = $sftp_connection;
+ return $sftp_connection;
+}
+
+
+# Try to mimic "cp". Very minimal...
+sub ssh_cp {
+ my $host_hash = shift;
+ my $ssh_args = shift;
+ my $dir = shift;
+ my @files = @_;
+
+ require Net::SFTP;
+
+ ssh_reset_err();
+
+ my $sftp = get_sftp_connection($host_hash, $ssh_args);
+ return unless $sftp; # error handling in get_sftp_connection()
+
+ eval {
+ local $SIG{ALRM} = sub {die "SSH command timeout!\n"};
+ alarm 60;
+ eval { # TODO more atomar eval around every command?
+ unless ( $sftp->do_opendir($dir) ) { # TODO disable warning !!!
+ # no target dir
+ if ($#files==0) {
+ # only one file to copy! Treat $dir as targetfilename
+ # TODO Create/check path to filename
+ $sftp->put($files[0], $dir);
+ return 1;
+ }
+ # try to create dir
+ # TODO mkpath ?....
+ $sftp->do_mkdir($dir) or return undef;
+ }
+ foreach my $file (@files) {
+ $sftp->put($file, "$dir/".basename($file));
+ }
+ };
+ alarm 0;
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ };
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ return 1;
+}
+
+# Copy files to directory on remote host. Directory has to exist!
+sub ssh_put2dir {
+ my $host_hash = shift;
+ my $ssh_args = shift;
+ my $dir = shift;
+ my @files = @_;
+
+ my $sftp = get_sftp_connection($host_hash,$ssh_args);
+ return unless $sftp; # error handling in get_sftp_connection()
+
+ eval {
+ local $SIG{ALRM} = sub {die "SSH command timeout!\n"};
+ alarm 60;
+ eval {
+ foreach my $file (@files) {
+ $sftp->put($file, "$dir/".basename($file));
+ }
+ };
+ alarm 0;
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ };
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ return 1;
+}
+
+# Copy one file from to remote host, using given remote filename
+sub ssh_put2file {
+ my $host_hash = shift;
+ my $ssh_args = shift;
+ my $target = shift;
+ my $source = shift;
+
+ my $sftp = get_sftp_connection($host_hash,$ssh_args);
+ return unless $sftp; # error handling in get_sftp_connection()
+
+ eval {
+ local $SIG{ALRM} = sub {die "SSH command timeout!\n"};
+ alarm 60;
+ eval {
+ $sftp->put($source, $target);
+ };
+ alarm 0;
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ };
+ if ($@) {
+ ssh_set_err(3,$@);
+ return;
+ }
+ return 1;
+}
+
+
+sub mkDir {
+ my $dir = shift
+ or croak "Internal error: mkDir needs directory as first argument";
+ if ( -e $dir ) {
+ if ( ! -d $dir ) {
+ croak "$dir exists and is not a directory!\n";
+ }
+ } else {
+ mkpath "$dir" or croak "Cannot mkdir ${dir}: $!\n";
+ }
+}
+
+
+#
+# Subs for data crunching
+#
+#TODO Obsolete - Remove when everything migrated to Hades::Data
+sub equal_tracerts {
+ my ($first, $second) = @_;
+
+ return 0 unless @$first == @$second;
+ for (my $i = 0; $i <= $#{$first}; $i++) {
+ if ( !defined($first->[$i]) || !defined($second->[$i]) ) {
+ next if !defined($first->[$i]) && !defined($second->[$i]);
+ return 0;
+ }
+ return 0 if $first->[$i]->{name} ne $second->[$i]->{name}
+ || $first->[$i]->{ip} ne $second->[$i]->{ip};
+ }
+ return 1;
+}
+
+
+#TODO Obsolete - Remove when everything migrated to Hades::Data
+# WARNING: Source data sets get destroyed!!!
+sub join_tracerts {
+ # Take first data set as starting point
+ my ($result_header, $result_timeline, $result_tracerts) = @{shift()};
+ foreach (@_) {
+ next if undef;
+ my ($header,$timeline,$tracerts) = @{$_};
+ #
+ # join headers
+ #
+ if ($result_header->{source} ne $header->{source}) {
+ # TODO err message???
+ next;
+ }
+ if ($result_header->{dest} ne $header->{dest}) {
+ # TODO err message???
+ next;
+ }
+ $result_header->{date} .= " $header->{date}";
+ #
+ # join routes
+ #
+ my @tracert_map = ();
+ for (my $i=0 ; $i <= $#{$tracerts} ; $i++) {
+ for (my $j=0 ; $j <= $#{$result_tracerts} ; $j++) {
+ if ( equal_tracerts($result_tracerts->[$j], $tracerts->[$i]) ) {
+ $tracert_map[$i] = $j; # reuse already available tracert
+ }
+ }
+ unless (exists $tracert_map[$i]) {
+ # new tracert -> generate new entry
+ push @$result_tracerts, $tracerts->[$i];
+ $tracert_map[$i] = $#{$result_tracerts};
+ }
+ }
+ #
+ # join timelines
+ #
+ # note: Last entry in @timeline only has the "end" date
+ # for previous interval
+ #TODO hierf�r einen sinnvollen "Kleben erlaubt"-Wert bestimmen???
+ #TODO Tage vorher sortieren???????? + L�cken richtig anschauen!
+ #delete $result_timeline->[-1];
+ for (my $i=0 ; $i <= $#{$timeline} ; $i++) {
+ if (defined $timeline->[$i]->{ref}) {
+ $timeline->[$i]->{ref} = $tracert_map[$timeline->[$i]->{ref}];
+ } elsif ($i < $#{$timeline}) {
+ #TODO err message???
+ next;
+ }
+ push @$result_timeline, $timeline->[$i];
+ #TODO timelines richtig zusammenkleben!!!
+ # $timeline->[$i]->{time} . "-";
+ # $timeline->[$i+1]->{time};
+ }
+ }
+ return [ $result_header, $result_timeline, $result_tracerts ];
+}
+
+
+
+return 1;
- [pS-dev] [GEANT/SA2/SA2T3-OPPD] r692 - in trunk/build: . HADES HADES/_build HADES/bin HADES/etc HADES/lib, svn-noreply, 03/18/2011
Archive powered by MHonArc 2.6.16.