Skip to content

Instantly share code, notes, and snippets.

@indigo423
Created February 14, 2017 09:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save indigo423/a8b15b7594bb0b68f859e36cdda2260e to your computer and use it in GitHub Desktop.
Save indigo423/a8b15b7594bb0b68f859e36cdda2260e to your computer and use it in GitHub Desktop.
Full config diff between Horizon 18.0.4-1 and 19.0.0-1
diff --git a/all.policy b/all.policy
index 751b326..7585f22 100644
--- a/all.policy
+++ b/all.policy
@@ -19,4 +19,4 @@
grant {
permission java.security.AllPermission;
-};
\ No newline at end of file
+};
diff --git a/collectd-configuration.xml b/collectd-configuration.xml
index a43ac1e..30a8bf2 100644
--- a/collectd-configuration.xml
+++ b/collectd-configuration.xml
@@ -3,37 +3,29 @@
<collectd-configuration
threads="50">
- <package name="cassandra21x">
- <filter><![CDATA[(IPADDR != '0.0.0.0') & (categoryName == 'Cassandra21x')]]></filter>
+ <package name="cassandra-via-jmx">
+ <filter>IPADDR != '0.0.0.0'</filter>
<service name="JMX-Cassandra" interval="300000" user-defined="false" status="on">
<parameter key="port" value="7199"/>
<parameter key="retry" value="2"/>
<parameter key="timeout" value="3000"/>
<parameter key="protocol" value="rmi"/>
<parameter key="urlPath" value="/jmxrmi"/>
- <parameter key="rrd-base-name" value="cassandra21x"/>
- <parameter key="ds-name" value="cassandra21x"/>
- <parameter key="friendly-name" value="cassandra21x"/>
- <parameter key="collection" value="cassandra21x"/>
+ <parameter key="collection" value="jmx-cassandra30x"/>
+ <parameter key="friendly-name" value="cassandra"/>
<parameter key="thresholding-enabled" value="true"/>
<parameter key="factory" value="PASSWORD-CLEAR"/>
<parameter key="username" value="cassandra-username"/>
<parameter key="password" value="cassandra-password"/>
</service>
- </package>
-
- <package name="cassandra21x-newts">
- <filter><![CDATA[(IPADDR != '0.0.0.0') & (catincCassandra21x& catincNewts)]]></filter>
<service name="JMX-Cassandra-Newts" interval="300000" user-defined="false" status="on">
<parameter key="port" value="7199"/>
<parameter key="retry" value="2"/>
<parameter key="timeout" value="3000"/>
<parameter key="protocol" value="rmi"/>
<parameter key="urlPath" value="/jmxrmi"/>
- <parameter key="rrd-base-name" value="cassandra21x-newts"/>
- <parameter key="ds-name" value="cassandra21x-newts"/>
- <parameter key="friendly-name" value="cassandra21x-newts"/>
- <parameter key="collection" value="cassandra21x-newts"/>
+ <parameter key="collection" value="jmx-cassandra30x-newts"/>
+ <parameter key="friendly-name" value="cassandra-newts"/>
<parameter key="thresholding-enabled" value="true"/>
<parameter key="factory" value="PASSWORD-CLEAR"/>
<parameter key="username" value="cassandra-username"/>
@@ -91,7 +83,22 @@
<parameter key="thresholding-enabled" value="true"/>
</service>
</package>
+ <package name="vmware6">
+ <filter><![CDATA[(IPADDR != '0.0.0.0') & (categoryName == 'VMware6')]]></filter>
+ <service name="VMware-VirtualMachine" interval="300000" user-defined="false" status="on">
+ <parameter key="collection" value="default-VirtualMachine6"/>
+ <parameter key="thresholding-enabled" value="true"/>
+ </service>
+ <service name="VMware-HostSystem" interval="300000" user-defined="false" status="on">
+ <parameter key="collection" value="default-HostSystem6"/>
+ <parameter key="thresholding-enabled" value="true"/>
+ </service>
+ <service name="VMwareCim-HostSystem" interval="300000" user-defined="false" status="on">
+ <parameter key="collection" value="default-ESX-HostSystem"/>
+ <parameter key="thresholding-enabled" value="true"/>
+ </service>
+ </package>
<package name="example1">
<filter>IPADDR != '0.0.0.0'</filter>
<include-range begin="1.1.1.1" end="254.254.254.254"/>
@@ -123,6 +130,32 @@
<parameter key="friendly-name" value="opennms-jvm"/>
</service>
+ <service name="JMX-Minion" interval="300000" user-defined="false" status="on">
+ <parameter key="port" value="1299"/>
+ <parameter key="retry" value="2"/>
+ <parameter key="timeout" value="3000"/>
+ <parameter key="urlPath" value="/karaf-minion"/>
+ <parameter key="factory" value="PASSWORD-CLEAR"/>
+ <parameter key="username" value="admin"/>
+ <parameter key="password" value="admin"/>
+ <parameter key="rrd-base-name" value="java"/>
+ <parameter key="collection" value="jmx-minion"/>
+ <parameter key="thresholding-enabled" value="true"/>
+ <parameter key="ds-name" value="jmx-minion"/>
+ <parameter key="friendly-name" value="jmx-minion"/>
+ </service>
+
+ <service name="JMX-Kafka" interval="300000" user-defined="false" status="on">
+ <parameter key="port" value="9999"/>
+ <parameter key="retry" value="2"/>
+ <parameter key="timeout" value="3000"/>
+ <parameter key="rrd-base-name" value="java"/>
+ <parameter key="collection" value="jmx-kafka"/>
+ <parameter key="thresholding-enabled" value="true"/>
+ <parameter key="ds-name" value="jmx-kafka"/>
+ <parameter key="friendly-name" value="jmx-kafka"/>
+ </service>
+
<service name="PostgreSQL" interval="300000" user-defined="false" status="on">
<parameter key="collection" value="PostgreSQL"/>
<parameter key="thresholding-enabled" value="true"/>
@@ -131,18 +164,26 @@
<parameter key="password" value="postgres"/>
<parameter key="url" value="jdbc:postgresql://OPENNMS_JDBC_HOSTNAME:5432/opennms"/>
</service>
+
+ <service name="Elasticsearch" interval="300000" user-defined="false" status="on">
+ <parameter key="collection" value="xml-elasticsearch-cluster-stats" />
+ <parameter key="handler-class" value="org.opennms.protocols.json.collector.DefaultJsonCollectionHandler"/>
+ </service>
</package>
- <collector service="JMX-Cassandra" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
- <collector service="JMX-Cassandra-Newts" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+ <collector service="Elasticsearch" class-name="org.opennms.protocols.xml.collector.XmlCollector"/>
+ <collector service="PostgreSQL" class-name="org.opennms.netmgt.collectd.JdbcCollector"/>
<collector service="SNMP" class-name="org.opennms.netmgt.collectd.SnmpCollector"/>
<collector service="WMI" class-name="org.opennms.netmgt.collectd.WmiCollector"/>
<collector service="WS-Man" class-name="org.opennms.netmgt.collectd.WsManCollector"/>
- <collector service="OpenNMS-JVM" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
<collector service="VMware-VirtualMachine" class-name="org.opennms.netmgt.collectd.VmwareCollector"/>
<collector service="VMware-HostSystem" class-name="org.opennms.netmgt.collectd.VmwareCollector"/>
<collector service="VMwareCim-HostSystem" class-name="org.opennms.netmgt.collectd.VmwareCimCollector"/>
- <collector service="PostgreSQL" class-name="org.opennms.netmgt.collectd.JdbcCollector"/>
-</collectd-configuration>
+ <collector service="OpenNMS-JVM" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+ <collector service="JMX-Minion" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+ <collector service="JMX-Cassandra" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+ <collector service="JMX-Cassandra-Newts" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+ <collector service="JMX-Kafka" class-name="org.opennms.netmgt.collectd.Jsr160Collector"/>
+</collectd-configuration>
diff --git a/config.properties b/config.properties
index 19f97c7..22f7b9b 100644
--- a/config.properties
+++ b/config.properties
@@ -71,11 +71,10 @@ org.osgi.framework.system.packages= \
org.osgi.service.packageadmin;uses:="org.osgi.framework";version="1.2",\
org.osgi.service.url;version="1.0", \
org.osgi.util.tracker;uses:="org.osgi.framework";version="1.5.1", \
- org.apache.karaf.jaas.boot;version="2.4.0", \
- org.apache.karaf.jaas.boot.principal;version="2.4.0", \
- org.apache.karaf.management.boot;version="2.4.0", \
- org.apache.karaf.version;version="2.4.0", \
- org.apache.karaf.diagnostic.core;version="2.4.0", \
+ org.apache.karaf.jaas.boot;version="2.4.3", \
+ org.apache.karaf.jaas.boot.principal;version="2.4.3", \
+ org.apache.karaf.version;version="2.4.3", \
+ org.apache.karaf.diagnostic.core;version="2.4.3", \
${jre-${java.specification.version}}
#
@@ -146,19 +145,19 @@ eecap-1.2= osgi.ee; osgi.ee="OSGi/Minimum"; version:List<Version>="1.0,1.1", \
#
# javax.transaction is needed to avoid class loader constraint violation when using javax.sql
#
-org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,org.apache.karaf.management.boot,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom
+org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom
# jVisualVM support
# in order to use Karaf with jvisualvm, the org.osgi.framework.bootdelegation property has to contain the org.netbeans.lib.profiler.server package
# and, so, it should look like:
#
-# org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,org.apache.karaf.jaas.boot.principal,org.apache.karaf.management.boot,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom,org.netbeans.lib.profiler.server
+# org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,org.apache.karaf.jaas.boot.principal,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom,org.netbeans.lib.profiler.server
#
# YourKit support
# in order to use Karaf with YourKit, the org.osgi.framework.bootdelegation property has to contain the com.yourkit.* packages
# and, so, it should look like:
#
-# org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,org.apache.karaf.jaas.boot.principal,org.apache.karaf.management.boot,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom,com.yourkit.*
+# org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,org.apache.karaf.jaas.boot.principal,sun.*,com.sun.*,javax.transaction,javax.transaction.*,javax.xml.crypto,javax.xml.crypto.*,org.apache.xerces.jaxp.datatype,org.apache.xerces.stax,org.apache.xerces.parsers,org.apache.xerces.jaxp,org.apache.xerces.jaxp.validation,org.apache.xerces.dom,com.yourkit.*
#
#
@@ -217,4 +216,4 @@ karaf.delay.console=false
#
# Set the Blueprint container in synchronous mode to avoid dependencies startup issue
#
-org.apache.aries.blueprint.synchronous=true
\ No newline at end of file
+org.apache.aries.blueprint.synchronous=true
diff --git a/create.sql b/create.sql
index 9cf2f5f..4ee24a8 100644
--- a/create.sql
+++ b/create.sql
@@ -246,7 +246,7 @@ CREATE TABLE monitoringlocationspollingpackages (
monitoringlocationid TEXT NOT NULL,
packagename TEXT NOT NULL,
- CONSTRAINT monitoringlocationspollingpackages_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE
+ CONSTRAINT monitoringlocationspollingpackages_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX monitoringlocationspollingpackages_id_idx on monitoringlocationspollingpackages(monitoringlocationid);
@@ -257,7 +257,7 @@ CREATE TABLE monitoringlocationscollectionpackages (
monitoringlocationid TEXT NOT NULL,
packagename TEXT NOT NULL,
- CONSTRAINT monitoringlocationscollectionpackages_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE
+ CONSTRAINT monitoringlocationscollectionpackages_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX monitoringlocationscollectionpackages_id_idx on monitoringlocationscollectionpackages(monitoringlocationid);
@@ -268,12 +268,18 @@ CREATE TABLE monitoringlocationstags (
monitoringlocationid TEXT NOT NULL,
tag TEXT NOT NULL,
- CONSTRAINT monitoringlocationstags_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE
+ CONSTRAINT monitoringlocationstags_fkey FOREIGN KEY (monitoringlocationid) REFERENCES monitoringlocations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX monitoringlocationstags_id_idx on monitoringlocationstags(monitoringlocationid);
CREATE UNIQUE INDEX monitoringlocationstags_id_pkg_idx on monitoringlocationstags(monitoringlocationid, tag);
+--##################################################################
+--# The following command adds the initial 'Default' entry to
+--# the 'monitoringlocations' table.
+--##################################################################
+INSERT INTO monitoringlocations (id, monitoringarea) values ('Default', 'Default');
+
--#####################################################
--# monitoringsystems Table - Contains a list of OpenNMS systems
@@ -317,7 +323,7 @@ CREATE UNIQUE INDEX monitoringsystemsproperties_id_property_idx on monitoringsys
--# The following command adds the initial localhost poller entry to
--# the 'monitoringsystems' table.
--##################################################################
-INSERT INTO monitoringsystems (id, label, location, type) values ('00000000-0000-0000-0000-000000000000', 'localhost', 'localhost', 'OpenNMS');
+INSERT INTO monitoringsystems (id, label, location, type) values ('00000000-0000-0000-0000-000000000000', 'localhost', 'Default', 'OpenNMS');
--#####################################################
@@ -339,7 +345,7 @@ CREATE TABLE scanreports (
timestamp TIMESTAMP WITH TIME ZONE,
CONSTRAINT scanreports_pkey PRIMARY KEY (id),
- CONSTRAINT scanreports_monitoringlocations_fkey FOREIGN KEY (location) REFERENCES monitoringlocations (id) ON DELETE CASCADE
+ CONSTRAINT scanreports_monitoringlocations_fkey FOREIGN KEY (location) REFERENCES monitoringlocations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE UNIQUE INDEX scanreports_id_idx on scanreport(id);
@@ -484,8 +490,10 @@ create table node (
lastCapsdPoll timestamp with time zone,
foreignSource varchar(64),
foreignId varchar(64),
+ location text not null,
- constraint pk_nodeID primary key (nodeID)
+ constraint pk_nodeID primary key (nodeID),
+ constraint fk_node_location foreign key (location) references monitoringlocations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
create index node_id_type_idx on node(nodeID, nodeType);
@@ -2350,6 +2358,7 @@ CREATE TABLE bsm_reduce (
type character varying(32) NOT NULL,
threshold float,
threshold_severity integer,
+ base float,
CONSTRAINT bsm_reduce_pkey PRIMARY KEY (id)
);
@@ -2419,3 +2428,38 @@ CREATE TABLE bsm_service_children (
CONSTRAINT fk_bsm_service_child_service_id FOREIGN KEY (bsm_service_child_id)
REFERENCES bsm_service (id) ON DELETE CASCADE
);
+
+--##################################################################
+--# Topology tables
+--##################################################################
+
+-- Layout table
+CREATE TABLE topo_layout (
+ id varchar(255) NOT NULL,
+ created timestamp NOT NULL,
+ creator varchar(255) NOT NULL,
+ updated timestamp NOT NULL,
+ updator varchar(255) NOT NULL,
+ last_used timestamp,
+ CONSTRAINT topo_layout_pkey PRIMARY KEY (id)
+);
+
+-- Layout coordinates of vertex
+CREATE TABLE topo_vertex_position (
+ id integer NOT NULL,
+ x integer NOT NULL,
+ y integer NOT NULL,
+ vertex_namespace varchar(255) NULL,
+ vertex_id varchar(255) NULL,
+ CONSTRAINT topo_vertex_position_pkey PRIMARY KEY (id)
+);
+
+-- Relation table (layout -> vertex positions)
+CREATE TABLE topo_layout_vertex_positions (
+ vertex_position_id integer NOT NULL,
+ layout_id varchar(255) NOT NULL,
+ CONSTRAINT fk_topo_layout_vertex_positions_layout_id FOREIGN KEY (layout_id)
+ REFERENCES topo_layout (id) ON DELETE CASCADE,
+ CONSTRAINT fk_topo_layout_vertex_positions_vertex_position_id FOREIGN KEY (vertex_position_id)
+ REFERENCES topo_vertex_position (id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/custom.properties b/custom.properties
index 1a5cfb4..11b5df6 100644
--- a/custom.properties
+++ b/custom.properties
@@ -32,7 +32,7 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
sun.misc,\
sun.net.spi.nameservice,\
javax.jms;version=1.1.0,\
- javax.servlet;javax.servlet.annotation;javax.servlet.descriptor;javax.servlet.http;javax.servlet.resources;version=2.6,\
+ javax.servlet;javax.servlet.annotation;javax.servlet.descriptor;javax.servlet.http;javax.servlet.resources;version=3.1.0,\
javax.persistence;version=2.0,\
javax.validation.constraints,\
javax.ws.rs;javax.ws.rs.client;javax.ws.rs.container;javax.ws.rs.core;javax.ws.rs.ext;version=2.1,\
@@ -45,27 +45,21 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
javax.wsdl.factory;version=1.6.3,\
javax.wsdl.xml;version=1.6.3,\
javax.wsdl;version=1.6.3,\
- javax.xml.bind;version=2.1.7,\
- javax.xml.bind.annotation;version=2.1.7,\
- javax.xml.bind.annotation.adapters;version=2.1.7,\
- javax.xml.bind.attachment;version=2.1.7,\
- javax.xml.bind.helpers;version=2.1.7,\
- javax.xml.bind.util;version=2.1.7,\
antlr;version=2.7.7,\
antlr.collections.impl;version=2.7.7,\
com.codahale.metrics;version=3.1.2,\
- com.google.common.annotations;version=17.0,\
- com.google.common.base;version=17.0,\
- com.google.common.cache;version=17.0,\
- com.google.common.collect;version=17.0,\
- com.google.common.eventbus;version=17.0,\
- com.google.common.hash;version=17.0,\
- com.google.common.io;version=17.0,\
- com.google.common.math;version=17.0,\
- com.google.common.net;version=17.0,\
- com.google.common.primitives;version=17.0,\
- com.google.common.reflect;version=17.0,\
- com.google.common.util.concurrent;version=17.0,\
+ com.google.common.annotations;version=18.0,\
+ com.google.common.base;version=18.0,\
+ com.google.common.cache;version=18.0,\
+ com.google.common.collect;version=18.0,\
+ com.google.common.eventbus;version=18.0,\
+ com.google.common.hash;version=18.0,\
+ com.google.common.io;version=18.0,\
+ com.google.common.math;version=18.0,\
+ com.google.common.net;version=18.0,\
+ com.google.common.primitives;version=18.0,\
+ com.google.common.reflect;version=18.0,\
+ com.google.common.util.concurrent;version=18.0,\
com.ibm.wsdl.extensions.http;version=1.6.3,\
com.ibm.wsdl.extensions.mime;version=1.6.3,\
com.ibm.wsdl.extensions.schema;version=1.6.3,\
@@ -83,12 +77,12 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
freemarker.template;version=2.3.21,\
freemarker.template.utility;version=2.3.21,\
org.apache.commons.beanutils;version=1.8.3,\
- org.apache.commons.codec;version=1.9,\
- org.apache.commons.codec.binary;version=1.9,\
- org.apache.commons.codec.digest;version=1.9,\
- org.apache.commons.codec.language;version=1.9,\
- org.apache.commons.codec.language.bm;version=1.9,\
- org.apache.commons.codec.net;version=1.9,\
+ org.apache.commons.codec;version=1.10,\
+ org.apache.commons.codec.binary;version=1.10,\
+ org.apache.commons.codec.digest;version=1.10,\
+ org.apache.commons.codec.language;version=1.10,\
+ org.apache.commons.codec.language.bm;version=1.10,\
+ org.apache.commons.codec.net;version=1.10,\
org.apache.commons.collections;version=3.2.2,\
org.apache.commons.collections.comparators;version=3.2.2,\
org.apache.commons.collections.keyvalue;version=3.2.2,\
@@ -116,97 +110,97 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
org.apache.commons.lang.reflect;version=2.6,\
org.apache.commons.lang.text;version=2.6,\
org.apache.commons.lang.time;version=2.6,\
- org.apache.cxf.annotations;version=3.1.5,\
- org.apache.cxf.attachment;version=3.1.5,\
- org.apache.cxf.binding;version=3.1.5,\
- org.apache.cxf.bus.blueprint;version=3.1.5,\
- org.apache.cxf.bus.extension;version=3.1.5,\
- org.apache.cxf.bus.managers;version=3.1.5,\
- org.apache.cxf.bus.osgi;version=3.1.5,\
- org.apache.cxf.bus.resource;version=3.1.5,\
- org.apache.cxf.bus.spring;version=3.1.5,\
- org.apache.cxf.bus;version=3.1.5,\
- org.apache.cxf.buslifecycle;version=3.1.5,\
- org.apache.cxf.catalog;version=3.1.5,\
- org.apache.cxf.common.annotation;version=3.1.5,\
- org.apache.cxf.common.classloader;version=3.1.5,\
- org.apache.cxf.common.i18n;version=3.1.5,\
- org.apache.cxf.common.injection;version=3.1.5,\
- org.apache.cxf.common.jaxb;version=3.1.5,\
- org.apache.cxf.common.logging;version=3.1.5,\
- org.apache.cxf.common.security;version=3.1.5,\
- org.apache.cxf.common.util;version=3.1.5,\
- org.apache.cxf.common.xmlschema;version=3.1.5,\
- org.apache.cxf.common;version=3.1.5,\
- org.apache.cxf.configuration.blueprint;version=3.1.5,\
- org.apache.cxf.configuration.jsse;version=3.1.5,\
- org.apache.cxf.configuration.security;version=3.1.5,\
- org.apache.cxf.configuration.spring;version=3.1.5,\
- org.apache.cxf.configuration;version=3.1.5,\
- org.apache.cxf.continuations;version=3.1.5,\
- org.apache.cxf.databinding.source.mime;version=3.1.5,\
- org.apache.cxf.databinding.source;version=3.1.5,\
- org.apache.cxf.databinding.stax;version=3.1.5,\
- org.apache.cxf.databinding;version=3.1.5,\
- org.apache.cxf.endpoint;version=3.1.5,\
- org.apache.cxf.extension;version=3.1.5,\
- org.apache.cxf.feature.transform;version=3.1.5,\
- org.apache.cxf.feature.validation;version=3.1.5,\
- org.apache.cxf.feature;version=3.1.5,\
- org.apache.cxf.headers;version=3.1.5,\
- org.apache.cxf.helpers;version=3.1.5,\
- org.apache.cxf.interceptor.security.callback;version=3.1.5,\
- org.apache.cxf.interceptor.security;version=3.1.5,\
- org.apache.cxf.interceptor.transform;version=3.1.5,\
- org.apache.cxf.interceptor;version=3.1.5,\
- org.apache.cxf.io;version=3.1.5,\
- org.apache.cxf.logging;version=3.1.5,\
- org.apache.cxf.management.annotation;version=3.1.5,\
- org.apache.cxf.management;version=3.1.5,\
- org.apache.cxf.message;version=3.1.5,\
- org.apache.cxf.phase;version=3.1.5,\
- org.apache.cxf.policy;version=3.1.5,\
- org.apache.cxf.resource;version=3.1.5,\
- org.apache.cxf.security.claims.authorization;version=3.1.5,\
- org.apache.cxf.security.transport;version=3.1.5,\
- org.apache.cxf.security;version=3.1.5,\
- org.apache.cxf.service.factory;version=3.1.5,\
- org.apache.cxf.service.invoker.spring;version=3.1.5,\
- org.apache.cxf.service.invoker;version=3.1.5,\
- org.apache.cxf.service.model;version=3.1.5,\
- org.apache.cxf.service;version=3.1.5,\
- org.apache.cxf.staxutils.transform;version=3.1.5,\
- org.apache.cxf.staxutils.validation;version=3.1.5,\
- org.apache.cxf.staxutils;version=3.1.5,\
- org.apache.cxf.transport.common.gzip;version=3.1.5,\
- org.apache.cxf.transport.http.auth;version=3.1.5,\
- org.apache.cxf.transport.http.blueprint;version=3.1.5,\
- org.apache.cxf.transport.http.osgi;version=3.1.5,\
- org.apache.cxf.transport.http.policy.impl;version=3.1.5,\
- org.apache.cxf.transport.http.policy;version=3.1.5,\
- org.apache.cxf.transport.http.spring;version=3.1.5,\
- org.apache.cxf.transport.http;version=3.1.5,\
- org.apache.cxf.transport.https.httpclient;version=3.1.5,\
- org.apache.cxf.transport.https;version=3.1.5,\
- org.apache.cxf.transport.servlet.blueprint;version=3.1.5,\
- org.apache.cxf.transport.servlet.servicelist;version=3.1.5,\
- org.apache.cxf.transport.servlet;version=3.1.5,\
- org.apache.cxf.transport;version=3.1.5,\
- org.apache.cxf.transports.http.configuration;version=3.1.5,\
- org.apache.cxf.validation;version=3.1.5,\
- org.apache.cxf.version;version=3.1.5,\
- org.apache.cxf.workqueue;version=3.1.5,\
- org.apache.cxf.ws.addressing.v200403;version=3.1.5,\
- org.apache.cxf.ws.addressing.v200408;version=3.1.5,\
- org.apache.cxf.ws.addressing.wsdl;version=3.1.5,\
- org.apache.cxf.ws.addressing;version=3.1.5,\
- org.apache.cxf.wsdl.binding;version=3.1.5,\
- org.apache.cxf.wsdl.http;version=3.1.5,\
- org.apache.cxf.wsdl.interceptors;version=3.1.5,\
- org.apache.cxf.wsdl.service.factory;version=3.1.5,\
- org.apache.cxf.wsdl11;version=3.1.5,\
- org.apache.cxf.wsdl;version=3.1.5,\
- org.apache.cxf;version=3.1.5,\
+ org.apache.cxf.annotations;version=3.1.7,\
+ org.apache.cxf.attachment;version=3.1.7,\
+ org.apache.cxf.binding;version=3.1.7,\
+ org.apache.cxf.bus.blueprint;version=3.1.7,\
+ org.apache.cxf.bus.extension;version=3.1.7,\
+ org.apache.cxf.bus.managers;version=3.1.7,\
+ org.apache.cxf.bus.osgi;version=3.1.7,\
+ org.apache.cxf.bus.resource;version=3.1.7,\
+ org.apache.cxf.bus.spring;version=3.1.7,\
+ org.apache.cxf.bus;version=3.1.7,\
+ org.apache.cxf.buslifecycle;version=3.1.7,\
+ org.apache.cxf.catalog;version=3.1.7,\
+ org.apache.cxf.common.annotation;version=3.1.7,\
+ org.apache.cxf.common.classloader;version=3.1.7,\
+ org.apache.cxf.common.i18n;version=3.1.7,\
+ org.apache.cxf.common.injection;version=3.1.7,\
+ org.apache.cxf.common.jaxb;version=3.1.7,\
+ org.apache.cxf.common.logging;version=3.1.7,\
+ org.apache.cxf.common.security;version=3.1.7,\
+ org.apache.cxf.common.util;version=3.1.7,\
+ org.apache.cxf.common.xmlschema;version=3.1.7,\
+ org.apache.cxf.common;version=3.1.7,\
+ org.apache.cxf.configuration.blueprint;version=3.1.7,\
+ org.apache.cxf.configuration.jsse;version=3.1.7,\
+ org.apache.cxf.configuration.security;version=3.1.7,\
+ org.apache.cxf.configuration.spring;version=3.1.7,\
+ org.apache.cxf.configuration;version=3.1.7,\
+ org.apache.cxf.continuations;version=3.1.7,\
+ org.apache.cxf.databinding.source.mime;version=3.1.7,\
+ org.apache.cxf.databinding.source;version=3.1.7,\
+ org.apache.cxf.databinding.stax;version=3.1.7,\
+ org.apache.cxf.databinding;version=3.1.7,\
+ org.apache.cxf.endpoint;version=3.1.7,\
+ org.apache.cxf.extension;version=3.1.7,\
+ org.apache.cxf.feature.transform;version=3.1.7,\
+ org.apache.cxf.feature.validation;version=3.1.7,\
+ org.apache.cxf.feature;version=3.1.7,\
+ org.apache.cxf.headers;version=3.1.7,\
+ org.apache.cxf.helpers;version=3.1.7,\
+ org.apache.cxf.interceptor.security.callback;version=3.1.7,\
+ org.apache.cxf.interceptor.security;version=3.1.7,\
+ org.apache.cxf.interceptor.transform;version=3.1.7,\
+ org.apache.cxf.interceptor;version=3.1.7,\
+ org.apache.cxf.io;version=3.1.7,\
+ org.apache.cxf.logging;version=3.1.7,\
+ org.apache.cxf.management.annotation;version=3.1.7,\
+ org.apache.cxf.management;version=3.1.7,\
+ org.apache.cxf.message;version=3.1.7,\
+ org.apache.cxf.phase;version=3.1.7,\
+ org.apache.cxf.policy;version=3.1.7,\
+ org.apache.cxf.resource;version=3.1.7,\
+ org.apache.cxf.security.claims.authorization;version=3.1.7,\
+ org.apache.cxf.security.transport;version=3.1.7,\
+ org.apache.cxf.security;version=3.1.7,\
+ org.apache.cxf.service.factory;version=3.1.7,\
+ org.apache.cxf.service.invoker.spring;version=3.1.7,\
+ org.apache.cxf.service.invoker;version=3.1.7,\
+ org.apache.cxf.service.model;version=3.1.7,\
+ org.apache.cxf.service;version=3.1.7,\
+ org.apache.cxf.staxutils.transform;version=3.1.7,\
+ org.apache.cxf.staxutils.validation;version=3.1.7,\
+ org.apache.cxf.staxutils;version=3.1.7,\
+ org.apache.cxf.transport.common.gzip;version=3.1.7,\
+ org.apache.cxf.transport.http.auth;version=3.1.7,\
+ org.apache.cxf.transport.http.blueprint;version=3.1.7,\
+ org.apache.cxf.transport.http.osgi;version=3.1.7,\
+ org.apache.cxf.transport.http.policy.impl;version=3.1.7,\
+ org.apache.cxf.transport.http.policy;version=3.1.7,\
+ org.apache.cxf.transport.http.spring;version=3.1.7,\
+ org.apache.cxf.transport.http;version=3.1.7,\
+ org.apache.cxf.transport.https.httpclient;version=3.1.7,\
+ org.apache.cxf.transport.https;version=3.1.7,\
+ org.apache.cxf.transport.servlet.blueprint;version=3.1.7,\
+ org.apache.cxf.transport.servlet.servicelist;version=3.1.7,\
+ org.apache.cxf.transport.servlet;version=3.1.7,\
+ org.apache.cxf.transport;version=3.1.7,\
+ org.apache.cxf.transports.http.configuration;version=3.1.7,\
+ org.apache.cxf.validation;version=3.1.7,\
+ org.apache.cxf.version;version=3.1.7,\
+ org.apache.cxf.workqueue;version=3.1.7,\
+ org.apache.cxf.ws.addressing.v200403;version=3.1.7,\
+ org.apache.cxf.ws.addressing.v200408;version=3.1.7,\
+ org.apache.cxf.ws.addressing.wsdl;version=3.1.7,\
+ org.apache.cxf.ws.addressing;version=3.1.7,\
+ org.apache.cxf.wsdl.binding;version=3.1.7,\
+ org.apache.cxf.wsdl.http;version=3.1.7,\
+ org.apache.cxf.wsdl.interceptors;version=3.1.7,\
+ org.apache.cxf.wsdl.service.factory;version=3.1.7,\
+ org.apache.cxf.wsdl11;version=3.1.7,\
+ org.apache.cxf.wsdl;version=3.1.7,\
+ org.apache.cxf;version=3.1.7,\
org.apache.http;version=4.3.3,\
org.apache.http.annotation;version=4.3.3,\
org.apache.http.concurrent;version=4.3.3,\
@@ -311,285 +305,302 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
org.joda.time.format;version=2.1,\
org.owasp.encoder;version=1.2,\
org.quartz;version=1.6.5,\
- org.springframework;version=4.0.5.RELEASE_1,\
- org.springframework.aop;version=4.0.5.RELEASE_1,\
- org.springframework.aop.aspectj;version=4.0.5.RELEASE_1,\
- org.springframework.aop.aspectj.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.aop.aspectj.autoproxy;version=4.0.5.RELEASE_1,\
- org.springframework.aop.config;version=4.0.5.RELEASE_1,\
- org.springframework.aop.framework;version=4.0.5.RELEASE_1,\
- org.springframework.aop.framework.adapter;version=4.0.5.RELEASE_1,\
- org.springframework.aop.framework.autoproxy;version=4.0.5.RELEASE_1,\
- org.springframework.aop.framework.autoproxy.target;version=4.0.5.RELEASE_1,\
- org.springframework.aop.interceptor;version=4.0.5.RELEASE_1,\
- org.springframework.aop.scope;version=4.0.5.RELEASE_1,\
- org.springframework.aop.support;version=4.0.5.RELEASE_1,\
- org.springframework.aop.support.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.aop.target;version=4.0.5.RELEASE_1,\
- org.springframework.aop.target.dynamic;version=4.0.5.RELEASE_1,\
- org.springframework.asm;version=4.0.5.RELEASE_1,\
- org.springframework.asm.commons;version=4.0.5.RELEASE_1,\
- org.springframework.asm.signature;version=4.0.5.RELEASE_1,\
- org.springframework.beans;version=4.0.5.RELEASE_1,\
- org.springframework.beans.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.access;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.access.el;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.aspectj;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.config;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.parsing;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.serviceloader;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.support;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.wiring;version=4.0.5.RELEASE_1,\
- org.springframework.beans.factory.xml;version=4.0.5.RELEASE_1,\
- org.springframework.beans.propertyeditors;version=4.0.5.RELEASE_1,\
- org.springframework.beans.support;version=4.0.5.RELEASE_1,\
- org.springframework.cache;version=4.0.5.RELEASE_1,\
- org.springframework.cache.ehcache;version=4.0.5.RELEASE_1,\
- org.springframework.context;version=4.0.5.RELEASE_1,\
- org.springframework.context.access;version=4.0.5.RELEASE_1,\
- org.springframework.context.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.context.config;version=4.0.5.RELEASE_1,\
- org.springframework.context.event;version=4.0.5.RELEASE_1,\
- org.springframework.context.expression;version=4.0.5.RELEASE_1,\
- org.springframework.context.i18n;version=4.0.5.RELEASE_1,\
- org.springframework.context.support;version=4.0.5.RELEASE_1,\
- org.springframework.context.weaving;version=4.0.5.RELEASE_1,\
- org.springframework.core;version=4.0.5.RELEASE_1,\
- org.springframework.core.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.core.convert;version=4.0.5.RELEASE_1,\
- org.springframework.core.convert.converter;version=4.0.5.RELEASE_1,\
- org.springframework.core.convert.support;version=4.0.5.RELEASE_1,\
- org.springframework.core.enums;version=4.0.5.RELEASE_1,\
- org.springframework.core.io;version=4.0.5.RELEASE_1,\
- org.springframework.core.io.support;version=4.0.5.RELEASE_1,\
- org.springframework.core.serializer;version=4.0.5.RELEASE_1,\
- org.springframework.core.serializer.support;version=4.0.5.RELEASE_1,\
- org.springframework.core.style;version=4.0.5.RELEASE_1,\
- org.springframework.core.task;version=4.0.5.RELEASE_1,\
- org.springframework.core.task.support;version=4.0.5.RELEASE_1,\
- org.springframework.core.type;version=4.0.5.RELEASE_1,\
- org.springframework.core.type.classreading;version=4.0.5.RELEASE_1,\
- org.springframework.core.type.filter;version=4.0.5.RELEASE_1,\
- org.springframework.dao;version=4.0.5.RELEASE_1,\
- org.springframework.dao.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.dao.support;version=4.0.5.RELEASE_1,\
- org.springframework.ejb;version=4.0.5.RELEASE_1,\
- org.springframework.ejb.access;version=4.0.5.RELEASE_1,\
- org.springframework.ejb.config;version=4.0.5.RELEASE_1,\
- org.springframework.ejb.interceptor;version=4.0.5.RELEASE_1,\
- org.springframework.ejb.support;version=4.0.5.RELEASE_1,\
- org.springframework.expression;version=4.0.5.RELEASE_1,\
- org.springframework.expression.common;version=4.0.5.RELEASE_1,\
- org.springframework.expression.spel;version=4.0.5.RELEASE_1,\
- org.springframework.expression.spel.ast;version=4.0.5.RELEASE_1,\
- org.springframework.expression.spel.generated;version=4.0.5.RELEASE_1,\
- org.springframework.expression.spel.standard;version=4.0.5.RELEASE_1,\
- org.springframework.expression.spel.support;version=4.0.5.RELEASE_1,\
- org.springframework.format;version=4.0.5.RELEASE_1,\
- org.springframework.format.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.format.datetime;version=4.0.5.RELEASE_1,\
- org.springframework.format.datetime.joda;version=4.0.5.RELEASE_1,\
- org.springframework.format.number;version=4.0.5.RELEASE_1,\
- org.springframework.format.support;version=4.0.5.RELEASE_1,\
- org.springframework.http;version=4.0.5.RELEASE_1,\
- org.springframework.http.client;version=4.0.5.RELEASE_1,\
- org.springframework.http.client.support;version=4.0.5.RELEASE_1,\
- org.springframework.http.converter;version=4.0.5.RELEASE_1,\
- org.springframework.http.converter.feed;version=4.0.5.RELEASE_1,\
- org.springframework.http.converter.json;version=4.0.5.RELEASE_1,\
- org.springframework.http.converter.xml;version=4.0.5.RELEASE_1,\
- org.springframework.http.server;version=4.0.5.RELEASE_1,\
- org.springframework.instrument;version=4.0.5.RELEASE_1,\
- org.springframework.instrument.classloading;version=4.0.5.RELEASE_1,\
- org.springframework.instrument.classloading.glassfish;version=4.0.5.RELEASE_1,\
- org.springframework.instrument.classloading.jboss;version=4.0.5.RELEASE_1,\
- org.springframework.instrument.classloading.oc4j;version=4.0.5.RELEASE_1,\
- org.springframework.instrument.classloading.weblogic;version=4.0.5.RELEASE_1,\
- org.springframework.jca;version=4.0.5.RELEASE_1,\
- org.springframework.jca.cci;version=4.0.5.RELEASE_1,\
- org.springframework.jca.cci.connection;version=4.0.5.RELEASE_1,\
- org.springframework.jca.cci.core;version=4.0.5.RELEASE_1,\
- org.springframework.jca.cci.core.support;version=4.0.5.RELEASE_1,\
- org.springframework.jca.cci.object;version=4.0.5.RELEASE_1,\
- org.springframework.jca.context;version=4.0.5.RELEASE_1,\
- org.springframework.jca.endpoint;version=4.0.5.RELEASE_1,\
- org.springframework.jca.support;version=4.0.5.RELEASE_1,\
- org.springframework.jca.work;version=4.0.5.RELEASE_1,\
- org.springframework.jca.work.glassfish;version=4.0.5.RELEASE_1,\
- org.springframework.jca.work.jboss;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.config;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.core;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.core.metadata;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.core.namedparam;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.core.simple;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.core.support;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.datasource;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.datasource.embedded;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.datasource.init;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.datasource.lookup;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.object;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support.incrementer;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support.lob;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support.nativejdbc;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support.rowset;version=4.0.5.RELEASE_1,\
- org.springframework.jdbc.support.xml;version=4.0.5.RELEASE_1,\
- org.springframework.jms;version=4.0.5.RELEASE_1,\
- org.springframework.jms.config;version=4.0.5.RELEASE_1,\
- org.springframework.jms.connection;version=4.0.5.RELEASE_1,\
- org.springframework.jms.core;version=4.0.5.RELEASE_1,\
- org.springframework.jms.core.support;version=4.0.5.RELEASE_1,\
- org.springframework.jms.listener;version=4.0.5.RELEASE_1,\
- org.springframework.jms.listener.adapter;version=4.0.5.RELEASE_1,\
- org.springframework.jms.listener.endpoint;version=4.0.5.RELEASE_1,\
- org.springframework.jms.remoting;version=4.0.5.RELEASE_1,\
- org.springframework.jms.support;version=4.0.5.RELEASE_1,\
- org.springframework.jms.support.converter;version=4.0.5.RELEASE_1,\
- org.springframework.jms.support.destination;version=4.0.5.RELEASE_1,\
- org.springframework.jmx;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.access;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export.assembler;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export.metadata;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export.naming;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.export.notification;version=4.0.5.RELEASE_1,\
- org.springframework.jmx.support;version=4.0.5.RELEASE_1,\
- org.springframework.jndi;version=4.0.5.RELEASE_1,\
- org.springframework.jndi.support;version=4.0.5.RELEASE_1,\
- org.springframework.mail;version=4.0.5.RELEASE_1,\
- org.springframework.mail.javamail;version=4.0.5.RELEASE_1,\
- org.springframework.mock;version=4.0.5.RELEASE_1,\
- org.springframework.mock.jndi;version=4.0.5.RELEASE_1,\
- org.springframework.mock.staticmock;version=4.0.5.RELEASE_1,\
- org.springframework.mock.web;version=4.0.5.RELEASE_1,\
- org.springframework.mock.web.portlet;version=4.0.5.RELEASE_1,\
- org.springframework.orm;version=4.0.5.RELEASE_1,\
- org.springframework.orm.hibernate3;version=4.0.5.RELEASE_1,\
- org.springframework.orm.hibernate3.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.orm.hibernate3.support;version=4.0.5.RELEASE_1,\
- org.springframework.orm.ibatis;version=4.0.5.RELEASE_1,\
- org.springframework.orm.ibatis.support;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jdo;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jdo.support;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jpa;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jpa.aspectj;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jpa.persistenceunit;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jpa.support;version=4.0.5.RELEASE_1,\
- org.springframework.orm.jpa.vendor;version=4.0.5.RELEASE_1,\
- org.springframework.oxm;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.castor;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.config;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.jaxb;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.jibx;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.mime;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.support;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.xmlbeans;version=4.0.5.RELEASE_1,\
- org.springframework.oxm.xstream;version=4.0.5.RELEASE_1,\
- org.springframework.remoting;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.caucho;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.httpinvoker;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.jaxrpc;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.jaxws;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.rmi;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.soap;version=4.0.5.RELEASE_1,\
- org.springframework.remoting.support;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.aspectj;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.backportconcurrent;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.commonj;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.concurrent;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.config;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.quartz;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.support;version=4.0.5.RELEASE_1,\
- org.springframework.scheduling.timer;version=4.0.5.RELEASE_1,\
- org.springframework.scripting;version=4.0.5.RELEASE_1,\
- org.springframework.scripting.bsh;version=4.0.5.RELEASE_1,\
- org.springframework.scripting.config;version=4.0.5.RELEASE_1,\
- org.springframework.scripting.groovy;version=4.0.5.RELEASE_1,\
- org.springframework.scripting.jruby;version=4.0.5.RELEASE_1,\
- org.springframework.scripting.support;version=4.0.5.RELEASE_1,\
+ org.snmp4j;version=2.4.3,\
+ org.snmp4j.asn1;version=2.4.3,\
+ org.snmp4j.event;version=2.4.3,\
+ org.snmp4j.log;version=2.4.3,\
+ org.snmp4j.mp;version=2.4.3,\
+ org.snmp4j.security;version=2.4.3,\
+ org.snmp4j.security.nonstandard;version=2.4.3,\
+ org.snmp4j.smi;version=2.4.3,\
+ org.snmp4j.test;version=2.4.3,\
+ org.snmp4j.tools;version=2.4.3,\
+ org.snmp4j.tools.console;version=2.4.3,\
+ org.snmp4j.transport;version=2.4.3,\
+ org.snmp4j.transport.ssh;version=2.4.3,\
+ org.snmp4j.transport.tls;version=2.4.3,\
+ org.snmp4j.uri;version=2.4.3,\
+ org.snmp4j.util;version=2.4.3,\
+ org.snmp4j.version;version=2.4.3,\
+ org.springframework;version=4.0.7.RELEASE_1,\
+ org.springframework.aop;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.aspectj;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.aspectj.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.aspectj.autoproxy;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.config;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.framework;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.framework.adapter;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.framework.autoproxy;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.framework.autoproxy.target;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.interceptor;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.scope;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.support;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.support.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.target;version=4.0.7.RELEASE_1,\
+ org.springframework.aop.target.dynamic;version=4.0.7.RELEASE_1,\
+ org.springframework.asm;version=4.0.7.RELEASE_1,\
+ org.springframework.asm.commons;version=4.0.7.RELEASE_1,\
+ org.springframework.asm.signature;version=4.0.7.RELEASE_1,\
+ org.springframework.beans;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.access;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.access.el;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.aspectj;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.config;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.parsing;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.serviceloader;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.support;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.wiring;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.factory.xml;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.propertyeditors;version=4.0.7.RELEASE_1,\
+ org.springframework.beans.support;version=4.0.7.RELEASE_1,\
+ org.springframework.cache;version=4.0.7.RELEASE_1,\
+ org.springframework.cache.ehcache;version=4.0.7.RELEASE_1,\
+ org.springframework.context;version=4.0.7.RELEASE_1,\
+ org.springframework.context.access;version=4.0.7.RELEASE_1,\
+ org.springframework.context.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.context.config;version=4.0.7.RELEASE_1,\
+ org.springframework.context.event;version=4.0.7.RELEASE_1,\
+ org.springframework.context.expression;version=4.0.7.RELEASE_1,\
+ org.springframework.context.i18n;version=4.0.7.RELEASE_1,\
+ org.springframework.context.support;version=4.0.7.RELEASE_1,\
+ org.springframework.context.weaving;version=4.0.7.RELEASE_1,\
+ org.springframework.core;version=4.0.7.RELEASE_1,\
+ org.springframework.core.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.core.convert;version=4.0.7.RELEASE_1,\
+ org.springframework.core.convert.converter;version=4.0.7.RELEASE_1,\
+ org.springframework.core.convert.support;version=4.0.7.RELEASE_1,\
+ org.springframework.core.enums;version=4.0.7.RELEASE_1,\
+ org.springframework.core.io;version=4.0.7.RELEASE_1,\
+ org.springframework.core.io.support;version=4.0.7.RELEASE_1,\
+ org.springframework.core.serializer;version=4.0.7.RELEASE_1,\
+ org.springframework.core.serializer.support;version=4.0.7.RELEASE_1,\
+ org.springframework.core.style;version=4.0.7.RELEASE_1,\
+ org.springframework.core.task;version=4.0.7.RELEASE_1,\
+ org.springframework.core.task.support;version=4.0.7.RELEASE_1,\
+ org.springframework.core.type;version=4.0.7.RELEASE_1,\
+ org.springframework.core.type.classreading;version=4.0.7.RELEASE_1,\
+ org.springframework.core.type.filter;version=4.0.7.RELEASE_1,\
+ org.springframework.dao;version=4.0.7.RELEASE_1,\
+ org.springframework.dao.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.dao.support;version=4.0.7.RELEASE_1,\
+ org.springframework.ejb;version=4.0.7.RELEASE_1,\
+ org.springframework.ejb.access;version=4.0.7.RELEASE_1,\
+ org.springframework.ejb.config;version=4.0.7.RELEASE_1,\
+ org.springframework.ejb.interceptor;version=4.0.7.RELEASE_1,\
+ org.springframework.ejb.support;version=4.0.7.RELEASE_1,\
+ org.springframework.expression;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.common;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.spel;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.spel.ast;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.spel.generated;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.spel.standard;version=4.0.7.RELEASE_1,\
+ org.springframework.expression.spel.support;version=4.0.7.RELEASE_1,\
+ org.springframework.format;version=4.0.7.RELEASE_1,\
+ org.springframework.format.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.format.datetime;version=4.0.7.RELEASE_1,\
+ org.springframework.format.datetime.joda;version=4.0.7.RELEASE_1,\
+ org.springframework.format.number;version=4.0.7.RELEASE_1,\
+ org.springframework.format.support;version=4.0.7.RELEASE_1,\
+ org.springframework.http;version=4.0.7.RELEASE_1,\
+ org.springframework.http.client;version=4.0.7.RELEASE_1,\
+ org.springframework.http.client.support;version=4.0.7.RELEASE_1,\
+ org.springframework.http.converter;version=4.0.7.RELEASE_1,\
+ org.springframework.http.converter.feed;version=4.0.7.RELEASE_1,\
+ org.springframework.http.converter.json;version=4.0.7.RELEASE_1,\
+ org.springframework.http.converter.xml;version=4.0.7.RELEASE_1,\
+ org.springframework.http.server;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument.classloading;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument.classloading.glassfish;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument.classloading.jboss;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument.classloading.oc4j;version=4.0.7.RELEASE_1,\
+ org.springframework.instrument.classloading.weblogic;version=4.0.7.RELEASE_1,\
+ org.springframework.jca;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.cci;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.cci.connection;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.cci.core;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.cci.core.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.cci.object;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.context;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.endpoint;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.work;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.work.glassfish;version=4.0.7.RELEASE_1,\
+ org.springframework.jca.work.jboss;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.config;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.core;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.core.metadata;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.core.namedparam;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.core.simple;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.core.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.datasource;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.datasource.embedded;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.datasource.init;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.datasource.lookup;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.object;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support.incrementer;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support.lob;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support.nativejdbc;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support.rowset;version=4.0.7.RELEASE_1,\
+ org.springframework.jdbc.support.xml;version=4.0.7.RELEASE_1,\
+ org.springframework.jms;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.config;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.connection;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.core;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.core.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.listener;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.listener.adapter;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.listener.endpoint;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.remoting;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.support.converter;version=4.0.7.RELEASE_1,\
+ org.springframework.jms.support.destination;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.access;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export.assembler;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export.metadata;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export.naming;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.export.notification;version=4.0.7.RELEASE_1,\
+ org.springframework.jmx.support;version=4.0.7.RELEASE_1,\
+ org.springframework.jndi;version=4.0.7.RELEASE_1,\
+ org.springframework.jndi.support;version=4.0.7.RELEASE_1,\
+ org.springframework.mail;version=4.0.7.RELEASE_1,\
+ org.springframework.mail.javamail;version=4.0.7.RELEASE_1,\
+ org.springframework.mock;version=4.0.7.RELEASE_1,\
+ org.springframework.mock.jndi;version=4.0.7.RELEASE_1,\
+ org.springframework.mock.staticmock;version=4.0.7.RELEASE_1,\
+ org.springframework.mock.web;version=4.0.7.RELEASE_1,\
+ org.springframework.mock.web.portlet;version=4.0.7.RELEASE_1,\
+ org.springframework.orm;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.hibernate3;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.hibernate3.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.hibernate3.support;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.ibatis;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.ibatis.support;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jdo;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jdo.support;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jpa;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jpa.aspectj;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jpa.persistenceunit;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jpa.support;version=4.0.7.RELEASE_1,\
+ org.springframework.orm.jpa.vendor;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.castor;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.config;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.jaxb;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.jibx;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.mime;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.support;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.xmlbeans;version=4.0.7.RELEASE_1,\
+ org.springframework.oxm.xstream;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.caucho;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.httpinvoker;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.jaxrpc;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.jaxws;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.rmi;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.soap;version=4.0.7.RELEASE_1,\
+ org.springframework.remoting.support;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.aspectj;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.backportconcurrent;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.commonj;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.concurrent;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.config;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.quartz;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.support;version=4.0.7.RELEASE_1,\
+ org.springframework.scheduling.timer;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting.bsh;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting.config;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting.groovy;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting.jruby;version=4.0.7.RELEASE_1,\
+ org.springframework.scripting.support;version=4.0.7.RELEASE_1,\
org.springframework.security.core;version=3.2.7.RELEASE,\
org.springframework.security.core.context;version=3.2.7.RELEASE,\
- org.springframework.stereotype;version=4.0.5.RELEASE_1,\
- org.springframework.test;version=4.0.5.RELEASE_1,\
- org.springframework.test.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.test.context;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.junit38;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.junit4;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.junit4.statements;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.support;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.testng;version=4.0.5.RELEASE_1,\
- org.springframework.test.context.transaction;version=4.0.5.RELEASE_1,\
- org.springframework.test.jdbc;version=4.0.5.RELEASE_1,\
- org.springframework.test.jpa;version=4.0.5.RELEASE_1,\
- org.springframework.test.util;version=4.0.5.RELEASE_1,\
- org.springframework.test.web;version=4.0.5.RELEASE_1,\
- org.springframework.transaction;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.aspectj;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.config;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.interceptor;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.jta;version=4.0.5.RELEASE_1,\
- org.springframework.transaction.support;version=4.0.5.RELEASE_1,\
- org.springframework.ui;version=4.0.5.RELEASE_1,\
- org.springframework.ui.context;version=4.0.5.RELEASE_1,\
- org.springframework.ui.context.support;version=4.0.5.RELEASE_1,\
- org.springframework.ui.freemarker;version=4.0.5.RELEASE_1,\
- org.springframework.ui.jasperreports;version=4.0.5.RELEASE_1,\
- org.springframework.ui.velocity;version=4.0.5.RELEASE_1,\
- org.springframework.util;version=4.0.5.RELEASE_1,\
- org.springframework.util.comparator;version=4.0.5.RELEASE_1,\
- org.springframework.util.xml;version=4.0.5.RELEASE_1,\
- org.springframework.validation;version=4.0.5.RELEASE_1,\
- org.springframework.validation.beanvalidation;version=4.0.5.RELEASE_1,\
- org.springframework.validation.support;version=4.0.5.RELEASE_1,\
- org.springframework.web;version=4.0.5.RELEASE_1,\
- org.springframework.web.bind;version=4.0.5.RELEASE_1,\
- org.springframework.web.bind.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.web.bind.annotation.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.bind.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.client;version=4.0.5.RELEASE_1,\
- org.springframework.web.client.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.context;version=4.0.5.RELEASE_1,\
- org.springframework.web.context.request;version=4.0.5.RELEASE_1,\
- org.springframework.web.context.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.filter;version=4.0.5.RELEASE_1,\
- org.springframework.web.jsf;version=4.0.5.RELEASE_1,\
- org.springframework.web.jsf.el;version=4.0.5.RELEASE_1,\
- org.springframework.web.multipart;version=4.0.5.RELEASE_1,\
- org.springframework.web.multipart.commons;version=4.0.5.RELEASE_1,\
- org.springframework.web.multipart.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.config;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.handler;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.i18n;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.mvc;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.mvc.annotation;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.mvc.multiaction;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.mvc.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.resource;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.support;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.tags;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.tags.form;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.theme;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.document;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.feed;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.freemarker;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.jasperreports;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.json;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.tiles;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.tiles2;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.velocity;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.xml;version=4.0.5.RELEASE_1,\
- org.springframework.web.servlet.view.xslt;version=4.0.5.RELEASE_1,\
- org.springframework.web.struts;version=4.0.5.RELEASE_1,\
- org.springframework.web.util;version=4.0.5.RELEASE_1,\
+ org.springframework.stereotype;version=4.0.7.RELEASE_1,\
+ org.springframework.test;version=4.0.7.RELEASE_1,\
+ org.springframework.test.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.junit38;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.junit4;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.junit4.statements;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.support;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.testng;version=4.0.7.RELEASE_1,\
+ org.springframework.test.context.transaction;version=4.0.7.RELEASE_1,\
+ org.springframework.test.jdbc;version=4.0.7.RELEASE_1,\
+ org.springframework.test.jpa;version=4.0.7.RELEASE_1,\
+ org.springframework.test.util;version=4.0.7.RELEASE_1,\
+ org.springframework.test.web;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.aspectj;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.config;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.interceptor;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.jta;version=4.0.7.RELEASE_1,\
+ org.springframework.transaction.support;version=4.0.7.RELEASE_1,\
+ org.springframework.ui;version=4.0.7.RELEASE_1,\
+ org.springframework.ui.context;version=4.0.7.RELEASE_1,\
+ org.springframework.ui.context.support;version=4.0.7.RELEASE_1,\
+ org.springframework.ui.freemarker;version=4.0.7.RELEASE_1,\
+ org.springframework.ui.jasperreports;version=4.0.7.RELEASE_1,\
+ org.springframework.ui.velocity;version=4.0.7.RELEASE_1,\
+ org.springframework.util;version=4.0.7.RELEASE_1,\
+ org.springframework.util.comparator;version=4.0.7.RELEASE_1,\
+ org.springframework.util.xml;version=4.0.7.RELEASE_1,\
+ org.springframework.validation;version=4.0.7.RELEASE_1,\
+ org.springframework.validation.beanvalidation;version=4.0.7.RELEASE_1,\
+ org.springframework.validation.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web;version=4.0.7.RELEASE_1,\
+ org.springframework.web.bind;version=4.0.7.RELEASE_1,\
+ org.springframework.web.bind.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.web.bind.annotation.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.bind.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.client;version=4.0.7.RELEASE_1,\
+ org.springframework.web.client.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.context;version=4.0.7.RELEASE_1,\
+ org.springframework.web.context.request;version=4.0.7.RELEASE_1,\
+ org.springframework.web.context.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.filter;version=4.0.7.RELEASE_1,\
+ org.springframework.web.jsf;version=4.0.7.RELEASE_1,\
+ org.springframework.web.jsf.el;version=4.0.7.RELEASE_1,\
+ org.springframework.web.multipart;version=4.0.7.RELEASE_1,\
+ org.springframework.web.multipart.commons;version=4.0.7.RELEASE_1,\
+ org.springframework.web.multipart.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.config;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.handler;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.i18n;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.mvc;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.mvc.annotation;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.mvc.multiaction;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.mvc.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.resource;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.support;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.tags;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.tags.form;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.theme;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.document;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.feed;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.freemarker;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.jasperreports;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.json;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.tiles;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.tiles2;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.velocity;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.xml;version=4.0.7.RELEASE_1,\
+ org.springframework.web.servlet.view.xslt;version=4.0.7.RELEASE_1,\
+ org.springframework.web.struts;version=4.0.7.RELEASE_1,\
+ org.springframework.web.util;version=4.0.7.RELEASE_1,\
org.springframework.binding.collection;version=2.3.4.RELEASE,\
org.springframework.binding.convert;version=2.3.4.RELEASE,\
org.springframework.binding.convert.converters;version=2.3.4.RELEASE,\
@@ -650,128 +661,129 @@ org.osgi.framework.system.packages.extra=org.apache.karaf.branding,\
org.springframework.webflow.test.execution;version=2.3.4.RELEASE,\
org.springframework.webflow.upgrade;version=2.3.4.RELEASE,\
org.springframework.webflow.validation;version=2.3.4.RELEASE,\
- org.opennms.netmgt;version=18.0.4,\
- org.opennms.netmgt.charts;version=18.0.4,\
- org.opennms.netmgt.collectd;version=18.0.4,\
- org.opennms.netmgt.collection.support;version=18.0.4,\
- org.opennms.netmgt.collection.support.builder;version=18.0.4,\
- org.opennms.netmgt.collection.api;version=18.0.4,\
- org.opennms.netmgt.config;version=18.0.4,\
- org.opennms.netmgt.config.api;version=18.0.4,\
- org.opennms.netmgt.config.categories;version=18.0.4,\
- org.opennms.netmgt.config.charts;version=18.0.4,\
- org.opennms.netmgt.config.datacollection;version=18.0.4,\
- org.opennms.netmgt.config.groups;version=18.0.4,\
- org.opennms.netmgt.config.kscReports;version=18.0.4,\
- org.opennms.netmgt.config.opennmsDataSources;version=18.0.4,\
- org.opennms.netmgt.config.siteStatusViews;version=18.0.4,\
- org.opennms.netmgt.config.surveillanceViews;version=18.0.4,\
- org.opennms.netmgt.config.users;version=18.0.4,\
- org.opennms.netmgt.config.viewsdisplay;version=18.0.4,\
- org.opennms.netmgt.config.webuiColors;version=18.0.4,\
- org.opennms.api.integration.ticketing;version=18.0.4,\
- org.opennms.api.reporting;version=18.0.4,\
- org.opennms.api.reporting.parameter;version=18.0.4,\
- org.opennms.container.web;version=18.0.4,\
- org.opennms.core.config.api;version=18.0.4,\
- org.opennms.core.db;version=18.0.4,\
- org.opennms.core.network;version=18.0.4,\
- org.opennms.core.criteria;version=18.0.4,\
- org.opennms.core.criteria.restrictions;version=18.0.4,\
- org.opennms.core.utils;version=18.0.4,\
- org.opennms.core.logging;version=18.0.4,\
- org.opennms.core.soa;version=18.0.4,\
- org.opennms.core.soa.filter;version=18.0.4,\
- org.opennms.core.soa.config;version=18.0.4,\
- org.opennms.core.soa.support;version=18.0.4,\
- org.opennms.core.spring;version=18.0.4,\
- org.opennms.core.xml;version=18.0.4,\
- org.opennms.core.concurrent;version=18.0.4,\
- org.opennms.core.fiber;version=18.0.4,\
- org.opennms.core.queue;version=18.0.4,\
- org.opennms.core.resource;version=18.0.4,\
- org.opennms.core.resource.db;version=18.0.4,\
- org.opennms.core.utils.url;version=18.0.4,\
- org.opennms.features.reporting.model;version=18.0.4,\
- org.opennms.features.reporting.model.basicreport;version=18.0.4,\
- org.opennms.features.reporting.model.jasper;version=18.0.4,\
- org.opennms.features.reporting.model.jasperreport;version=18.0.4,\
- org.opennms.features.reporting.model.remoterepository;version=18.0.4,\
- org.opennms.features.reporting.repository;version=18.0.4,\
- org.opennms.features.reporting.repository.global;version=18.0.4,\
- org.opennms.features.reporting.repository.local;version=18.0.4,\
- org.opennms.features.reporting.repository.remote;version=18.0.4,\
- org.opennms.netmgt.bsm.service;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model.edge;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model.edge.ro;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model.functions.map;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model.functions.reduce;version=18.0.4,\
- org.opennms.netmgt.bsm.service.model.graph;version=18.0.4,\
- org.opennms.netmgt.bsm.persistence.api;version=18.0.4,\
- org.opennms.netmgt.bsm.service.internal;version=18.0.4,\
- org.opennms.netmgt.dao;version=18.0.4,\
- org.opennms.netmgt.dao.api;version=18.0.4,\
- org.opennms.netmgt.dao.support;version=18.0.4,\
- org.opennms.netmgt.events.api;version=18.0.4,\
- org.opennms.netmgt.events.api.annotations;version=18.0.4,\
- org.opennms.netmgt.events.api.support;version=18.0.4,\
- org.opennms.netmgt.jmx.connection;version=18.0.4,\
- org.opennms.netmgt.jmx.impl.connection.connectors;version=18.0.4,\
- org.opennms.netmgt.model;version=18.0.4,\
- org.opennms.netmgt.model.alarm;version=18.0.4,\
- org.opennms.netmgt.alarmd.api;version=18.0.4,\
- org.opennms.netmgt.alarmd.api.support;version=18.0.4,\
- org.opennms.netmgt.model.capsd;version=18.0.4,\
- org.opennms.netmgt.model.discovery;version=18.0.4,\
- org.opennms.netmgt.model.events;version=18.0.4,\
- org.opennms.netmgt.model.minion;version=18.0.4,\
- org.opennms.netmgt.model.ncs;version=18.0.4,\
- org.opennms.netmgt.model.notifd;version=18.0.4,\
- org.opennms.netmgt.model.outage;version=18.0.4,\
- org.opennms.netmgt.model.topology;version=18.0.4,\
- org.opennms.netmgt.poller;version=18.0.4,\
- org.opennms.netmgt.provision.persist;version=18.0.4,\
- org.opennms.netmgt.provision.persist.foreignsource;version=18.0.4,\
- org.opennms.netmgt.provision.persist.policies;version=18.0.4,\
- org.opennms.netmgt.provision.persist.requisition;version=18.0.4,\
- org.opennms.netmgt.rrd;version=18.0.4,\
- org.opennms.netmgt.snmp;version=18.0.4,\
- org.opennms.netmgt.syslogd;version=18.0.4,\
- org.opennms.netmgt.ticketd;version=18.0.4,\
- org.opennms.netmgt.xml.event;version=18.0.4,\
- org.opennms.netmgt.xml.eventconf;version=18.0.4,\
- org.opennms.netmgt.xml.rtc;version=18.0.4,\
- org.opennms.reporting.core;version=18.0.4,\
- org.opennms.reporting.core.svclayer;version=18.0.4,\
- org.opennms.reporting.core.svclayer.support;version=18.0.4,\
- org.opennms.web.api;version=18.0.4,\
- org.opennms.web.category;version=18.0.4,\
- org.opennms.web.charts;version=18.0.4,\
- org.opennms.web.navigate;version=18.0.4,\
- org.opennms.web.servlet;version=18.0.4,\
- org.opennms.web.svclayer;version=18.0.4,\
- org.opennms.web.svclayer.api;version=18.0.4,\
- org.opennms.web.svclayer.dao;version=18.0.4,\
- org.opennms.web.svclayer.dao.support;version=18.0.4,\
- org.opennms.web.svclayer.model;version=18.0.4,\
- org.opennms.web.svclayer.support;version=18.0.4,\
- org.opennms.web.springframework.security;version=18.0.4,\
- org.opennms.netmgt.icmp;version=18.0.4
+ org.opennms.netmgt;version=19.0.0,\
+ org.opennms.netmgt.charts;version=19.0.0,\
+ org.opennms.netmgt.collectd;version=19.0.0,\
+ org.opennms.netmgt.collection.support;version=19.0.0,\
+ org.opennms.netmgt.collection.support.builder;version=19.0.0,\
+ org.opennms.netmgt.collection.api;version=19.0.0,\
+ org.opennms.netmgt.config;version=19.0.0,\
+ org.opennms.netmgt.config.api;version=19.0.0,\
+ org.opennms.netmgt.config.categories;version=19.0.0,\
+ org.opennms.netmgt.config.charts;version=19.0.0,\
+ org.opennms.netmgt.config.collectd;version=19.0.0,\
+ org.opennms.netmgt.config.datacollection;version=19.0.0,\
+ org.opennms.netmgt.config.groups;version=19.0.0,\
+ org.opennms.netmgt.config.kscReports;version=19.0.0,\
+ org.opennms.netmgt.config.opennmsDataSources;version=19.0.0,\
+ org.opennms.netmgt.config.siteStatusViews;version=19.0.0,\
+ org.opennms.netmgt.config.surveillanceViews;version=19.0.0,\
+ org.opennms.netmgt.config.trapd;version=19.0.0,\
+ org.opennms.netmgt.config.users;version=19.0.0,\
+ org.opennms.netmgt.config.viewsdisplay;version=19.0.0,\
+ org.opennms.netmgt.config.webuiColors;version=19.0.0,\
+ org.opennms.api.integration.ticketing;version=19.0.0,\
+ org.opennms.api.reporting;version=19.0.0,\
+ org.opennms.api.reporting.parameter;version=19.0.0,\
+ org.opennms.container.web;version=19.0.0,\
+ org.opennms.core.config.api;version=19.0.0,\
+ org.opennms.core.db;version=19.0.0,\
+ org.opennms.core.network;version=19.0.0,\
+ org.opennms.core.criteria;version=19.0.0,\
+ org.opennms.core.criteria.restrictions;version=19.0.0,\
+ org.opennms.core.utils;version=19.0.0,\
+ org.opennms.core.logging;version=19.0.0,\
+ org.opennms.core.soa;version=19.0.0,\
+ org.opennms.core.soa.filter;version=19.0.0,\
+ org.opennms.core.soa.config;version=19.0.0,\
+ org.opennms.core.soa.support;version=19.0.0,\
+ org.opennms.core.spring;version=19.0.0,\
+ org.opennms.core.xml;version=19.0.0,\
+ org.opennms.core.concurrent;version=19.0.0,\
+ org.opennms.core.fiber;version=19.0.0,\
+ org.opennms.core.queue;version=19.0.0,\
+ org.opennms.core.resource;version=19.0.0,\
+ org.opennms.core.resource.db;version=19.0.0,\
+ org.opennms.core.rpc.api;version=19.0.0,\
+ org.opennms.core.utils.url;version=19.0.0,\
+ org.opennms.features.geolocation.api;version=19.0.0,\
+ org.opennms.features.reporting.model;version=19.0.0,\
+ org.opennms.features.reporting.model.basicreport;version=19.0.0,\
+ org.opennms.features.reporting.model.jasper;version=19.0.0,\
+ org.opennms.features.reporting.model.jasperreport;version=19.0.0,\
+ org.opennms.features.reporting.model.remoterepository;version=19.0.0,\
+ org.opennms.features.reporting.repository;version=19.0.0,\
+ org.opennms.features.reporting.repository.global;version=19.0.0,\
+ org.opennms.features.reporting.repository.local;version=19.0.0,\
+ org.opennms.features.reporting.repository.remote;version=19.0.0,\
+ org.opennms.netmgt.bsm.service;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model.edge;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model.edge.ro;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model.functions.map;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model.functions.reduce;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.model.graph;version=19.0.0,\
+ org.opennms.netmgt.bsm.persistence.api;version=19.0.0,\
+ org.opennms.netmgt.bsm.service.internal;version=19.0.0,\
+ org.opennms.netmgt.dao;version=19.0.0,\
+ org.opennms.netmgt.dao.api;version=19.0.0,\
+ org.opennms.netmgt.dao.support;version=19.0.0,\
+ org.opennms.netmgt.events.api;version=19.0.0,\
+ org.opennms.netmgt.events.api.annotations;version=19.0.0,\
+ org.opennms.netmgt.events.api.support;version=19.0.0,\
+ org.opennms.netmgt.jmx.connection;version=19.0.0,\
+ org.opennms.netmgt.jmx.impl.connection.connectors;version=19.0.0,\
+ org.opennms.netmgt.model;version=19.0.0,\
+ org.opennms.netmgt.model.alarm;version=19.0.0,\
+ org.opennms.netmgt.alarmd.api;version=19.0.0,\
+ org.opennms.netmgt.alarmd.api.support;version=19.0.0,\
+ org.opennms.netmgt.measurements.api;version=19.0.0,\
+ org.opennms.netmgt.measurements.api.exceptions;version=19.0.0,\
+ org.opennms.netmgt.measurements.model;version=19.0.0,\
+ org.opennms.netmgt.model.capsd;version=19.0.0,\
+ org.opennms.netmgt.model.discovery;version=19.0.0,\
+ org.opennms.netmgt.model.events;version=19.0.0,\
+ org.opennms.netmgt.model.minion;version=19.0.0,\
+ org.opennms.netmgt.model.monitoringLocations;version=19.0.0,\
+ org.opennms.netmgt.model.ncs;version=19.0.0,\
+ org.opennms.netmgt.model.notifd;version=19.0.0,\
+ org.opennms.netmgt.model.outage;version=19.0.0,\
+ org.opennms.netmgt.model.topology;version=19.0.0,\
+ org.opennms.netmgt.poller;version=19.0.0,\
+ org.opennms.netmgt.poller.support;version=19.0.0,\
+ org.opennms.netmgt.provision;version=19.0.0,\
+ org.opennms.netmgt.provision.detector.registry.api;version=19.0.0,\
+ org.opennms.netmgt.provision.persist;version=19.0.0,\
+ org.opennms.netmgt.provision.persist.foreignsource;version=19.0.0,\
+ org.opennms.netmgt.provision.persist.policies;version=19.0.0,\
+ org.opennms.netmgt.provision.persist.requisition;version=19.0.0,\
+ org.opennms.netmgt.rrd;version=19.0.0,\
+ org.opennms.netmgt.snmp;version=19.0.0,\
+ org.opennms.netmgt.snmp.snmp4j;version=19.0.0,\
+ org.opennms.netmgt.snmp.proxy;version=19.0.0,\
+ org.opennms.netmgt.syslogd;version=19.0.0,\
+ org.opennms.netmgt.ticketd;version=19.0.0,\
+ org.opennms.netmgt.topology.persistence.api;version=19.0.0,\
+ org.opennms.netmgt.trapd;version=19.0.0,\
+ org.opennms.netmgt.xml.event;version=19.0.0,\
+ org.opennms.netmgt.xml.eventconf;version=19.0.0,\
+ org.opennms.netmgt.xml.rtc;version=19.0.0,\
+ org.opennms.reporting.core;version=19.0.0,\
+ org.opennms.reporting.core.svclayer;version=19.0.0,\
+ org.opennms.reporting.core.svclayer.support;version=19.0.0,\
+ org.opennms.web.api;version=19.0.0,\
+ org.opennms.web.category;version=19.0.0,\
+ org.opennms.web.charts;version=19.0.0,\
+ org.opennms.web.navigate;version=19.0.0,\
+ org.opennms.web.servlet;version=19.0.0,\
+ org.opennms.web.springframework;version=19.0.0,\
+ org.opennms.web.springframework.security;version=19.0.0,\
+ org.opennms.web.svclayer;version=19.0.0,\
+ org.opennms.web.svclayer.api;version=19.0.0,\
+ org.opennms.web.svclayer.dao;version=19.0.0,\
+ org.opennms.web.svclayer.dao.support;version=19.0.0,\
+ org.opennms.web.svclayer.model;version=19.0.0,\
+ org.opennms.web.svclayer.support;version=19.0.0,\
+ org.opennms.netmgt.icmp;version=19.0.0,\
+ org.opennms.netmgt.icmp.proxy;version=19.0.0
-
-
-# org.snmp4j;version=1.11.1,\
-# org.snmp4j.asn1;version=1.11.1,\
-# org.snmp4j.doc-files;version=1.11.1,\
-# org.snmp4j.event;version=1.11.1,\
-# org.snmp4j.log;version=1.11.1,\
-# org.snmp4j.mp;version=1.11.1,\
-# org.snmp4j.security;version=1.11.1,\
-# org.snmp4j.smi;version=1.11.1,\
-# org.snmp4j.smi;version=1.11.1,\
-# org.snmp4j.test;version=1.11.1,\
-# org.snmp4j.tools.console;version=1.11.1,\
-# org.snmp4j.transport;version=1.11.1,\
-# org.snmp4j.util;version=1.11.1,\
-# org.snmp4j.version;version=1.11.1
diff --git a/database-reports.xml b/database-reports.xml
index 38cfa64..e3b9094 100644
--- a/database-reports.xml
+++ b/database-reports.xml
@@ -7,35 +7,40 @@
report-service="availabilityReportService" description="standard opennms report in calendar format" />
<report id="defaultClassicReport" display-name="Default classic report"
report-service="availabilityReportService" description="standard opennms report in tabular format" />
- <report id="Early-Morning-Report" display-name="Early morning report" online="true"
+ <report id="Early-Morning-Report" display-name="Early morning report" online="true"
report-service="jasperReportService" description="Global overview of outages, notifications and events in last 24 hours" />
- <report id="Response-Time-Summary-Report" display-name="Response Time Summary for node" online="true"
+ <report id="Response-Time-Summary-Report" display-name="Response Time Summary for node" online="true"
report-service="jasperReportService" description="Response Time by node across one or more surveillance categories. Note: % can be used as a place holder for any string literal" />
- <report id="Node-Availability-Report" display-name="Availability by node" online="true"
+ <report id="Node-Availability-Report" display-name="Availability by node" online="true"
report-service="jasperReportService" description="Availability by node across one or more surveillance categories. Note: % can be used as a place holder for any string literal" />
- <report id="Availability-Summary-Report" display-name="Availability Summary -Default configuration for past 7 Days" online="true"
+ <report id="Availability-Summary-Report" display-name="Availability Summary -Default configuration for past 7 Days" online="true"
report-service="jasperReportService" description="Availability summary across one or more surveillance categories. Note: % can be used as a place holder for any string literal" />
- <report id="Response-Time-Report" display-name="Response time by node" online="true"
+ <report id="Response-Time-Report" display-name="Response time by node" online="true"
report-service="jasperReportService" description="Response time by node across one or more surveillance categories. Note: % can be used as a place holder for any string literal" />
- <report id="Serial-Interface-Utilization-Summary" display-name="Serial Interface Utilization Summary" online="true"
+ <report id="Serial-Interface-Utilization-Summary" display-name="Serial Interface Utilization Summary" online="true"
report-service="jasperReportService" description="Serial Interface Utilization Summary" />
- <report id="Total-Bytes-Transferred-By-Interface" display-name="Total Bytes Transferred by Interface " online="true"
+ <report id="Total-Bytes-Transferred-By-Interface" display-name="Total Bytes Transferred by Interface " online="true"
report-service="jasperReportService" description="Total Bytes Transferred by Interface" />
- <report id="Average-Peak-Traffic-Rates" display-name="Average and Peak Traffic rates for Nodes by Interface" online="true"
+ <report id="Average-Peak-Traffic-Rates" display-name="Average and Peak Traffic rates for Nodes by Interface" online="true"
report-service="jasperReportService" description="Average and Peak Traffic rates for Nodes by Interface" />
- <report id="Interface-Availability-Report" display-name="Interface Availability Report" online="true"
+ <report id="Interface-Availability-Report" display-name="Interface Availability Report" online="true"
report-service="jasperReportService" description="Interface Availability Report, show interface availability for interfaces with outages within time range" />
- <report id="Snmp-Interface-Oper-Availability" display-name="Snmp Interface Availability Report" online="true"
+ <report id="Snmp-Interface-Oper-Availability" display-name="Snmp Interface Availability Report" online="true"
report-service="jasperReportService" description="Snmp Interface Availability Report, shows availability for snmp interfaces with interfaceOperDown outages within the time range" />
- <report id="AssetMangementMaintExpired" display-name="Maintenance contracts expired" online="true"
+ <report id="AssetMangementMaintExpired" display-name="Maintenance contracts expired" online="true"
report-service="jasperReportService" description="Asset management report shows all maintenance contracts expired." />
- <report id="AssetMangementMaintStrategy" display-name="Maintenance contracts strategy" online="true"
+ <report id="AssetMangementMaintStrategy" display-name="Maintenance contracts strategy" online="true"
report-service="jasperReportService" description="Asset management report focused on maintenance strategy.Forecast and overview for 12 month and informations about age of nodes and maintenance contracts" />
- <report id="Event-Analysis" display-name="Event Analysis report" online="true"
+ <report id="Event-Analysis" display-name="Event Analysis report" online="true"
report-service="jasperReportService" description="Analyse events based on events source and quantity by nodes." />
<!-- The following report expects that storeByGroup is enabled -->
<!--
- <report id="DiskUsageForCTX" display-name="C: Disk Usage for CTX servers" online="true"
+ <report id="DiskUsageForCTX" display-name="C: Disk Usage for CTX servers" online="true"
report-service="jasperReportService" description="C: Disk Usage for CTX servers" />
-->
+<!-- The following report expects that storeByForeignSource is enabled -->
+<!--
+ <report id="Top20-IOWait" display-name="TOP 20 nodes by I/O Wait" online="true"
+ report-service="jasperReportService" description="TOP 20 nodes by I/O Wait" />
+-->
</database-reports>
diff --git a/database-schema.xml b/database-schema.xml
index f6994b6..6616dca 100644
--- a/database-schema.xml
+++ b/database-schema.xml
@@ -1,20 +1,10 @@
<?xml version="1.0"?>
<database-schema>
- <table name="distPoller" visible="false">
- <column name="dpNumber"/>
- <column name="dpName"/>
- <column name="dpIP"/>
- <column name="dpComment"/>
- <column name="dpDiscLimit"/>
- <column name="dpAdminState"/>
- <column name="dpRunState"/>
- </table>
-
<table name="node">
<join column="nodeID" table="ipInterface" table-column="nodeID"/>
<column name="nodeID"/>
- <column name="dpName" visible="false"/>
+ <column name="location"/>
<column name="nodeCreateTime"/>
<column name="nodeParentID"/>
<column name="nodeType"/>
@@ -24,6 +14,10 @@
<column name="nodeSysLocation"/>
<column name="nodeSysContact"/>
<column name="nodeLabel"/>
+ <column name="nodeLabelSource"/>
+ <column name="nodeNetbiosName"/>
+ <column name="nodeDomainName"/>
+ <column name="operatingSystem"/>
<column name="foreignSource"/>
<column name="foreignID"/>
</table>
@@ -57,7 +51,6 @@
<join column="id" table="ipInterface" table-column="snmpinterfaceid"/>
<column name="id" visible="false"/>
<column name="nodeID" visible="false"/>
- <column name="ipAddr" visible="false"/>
<column name="snmpIpAdEntNetMask"/>
<column name="snmpPhysAddr"/>
<column name="snmpIfIndex"/>
@@ -67,7 +60,8 @@
<column name="snmpIfAlias"/>
<column name="snmpIfAdminStatus"/>
<column name="snmpIfOperStatus"/>
- <column name="snmpcollect"/>
+ <column name="snmpCollect"/>
+ <column name="snmpPoll"/>
</table>
<table name="service">
diff --git a/datacollection/fortinet-fortigate-application-v5.2.xml b/datacollection/fortinet-fortigate-application-v5.2.xml
index 2cf392d..af11eba 100644
--- a/datacollection/fortinet-fortigate-application-v5.2.xml
+++ b/datacollection/fortinet-fortigate-application-v5.2.xml
@@ -160,15 +160,15 @@
<mibObj oid=".1.3.6.1.4.1.12356.101.10.111.3.1.1" instance="fgApFTPStatsEntry" alias="fgApFTPReqProcessed" type="Counter32" />
</group>
<group name="fgApFTPConnections" ifType="ignore">
- <mibObj oid=".1.3.6.1.4.1.12356.101.10.111.4" instance="0" alias="fgApFTPConnections" type="Integer32" />
- <mibObj oid=".1.3.6.1.4.1.12356.101.10.111.5" instance="0" alias="fgApFTPMaxConnections" type="Integer32" />
+ <mibObj oid=".1.3.6.1.4.1.12356.101.10.111.4" instance="0" alias="fgApFTPConns" type="Integer32" />
+ <mibObj oid=".1.3.6.1.4.1.12356.101.10.111.5" instance="0" alias="fgApFTPMaxConns" type="Integer32" />
</group>
<group name="fgWebCacheDiskStatsTable" ifType="all">
<mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.1" instance="fgWebChDskStsEntry" alias="fgWebCacheDisk" type="String" />
<mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.2" instance="fgWebChDskStsEntry" alias="fgWebCacheDiskLimit" type="Gauge64" />
<mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.3" instance="fgWebChDskStsEntry" alias="fgWebCacheDiskUsage" type="Gauge64" />
<mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.4" instance="fgWebChDskStsEntry" alias="fgWebCacheDiskHits" type="Counter32" />
- <mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.5" instance="fgWebChDskStsEntry" alias="fgWebCacheDiskMisses" type="Counter32" />
+ <mibObj oid=".1.3.6.1.4.1.12356.101.10.113.2.1.5" instance="fgWebChDskStsEntry" alias="fgWebCacheDiskMiss" type="Counter32" />
</group>
<systemDef name="Fortinet-Fortigate-Application-v5.2">
<sysoidMask>.1.3.6.1.4.1.12356.</sysoidMask>
diff --git a/datacollection/netapp.xml b/datacollection/netapp.xml
index 53e1c16..6339aaf 100644
--- a/datacollection/netapp.xml
+++ b/datacollection/netapp.xml
@@ -46,8 +46,8 @@
</group>
<group name="netapp-misc" ifType="ignore">
- <mibObj oid=".1.3.6.1.4.1.789.1.2.2.16" instance="0" alias="naMiscLowDiskReadBytes" type="Counter" />
- <mibObj oid=".1.3.6.1.4.1.789.1.2.2.18" instance="0" alias="naMiscLowDiskWriteBytes" type="Counter" />
+ <mibObj oid=".1.3.6.1.4.1.789.1.2.2.16" instance="0" alias="naMscLowDiskRdBytes" type="Counter" />
+ <mibObj oid=".1.3.6.1.4.1.789.1.2.2.18" instance="0" alias="naMscLowDiskWrBytes" type="Counter" />
</group>
<group name="netapp-sis" ifType="all">
diff --git a/datacollection/vmware6.xml b/datacollection/vmware6.xml
new file mode 100644
index 0000000..3c7393a
--- /dev/null
+++ b/datacollection/vmware6.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+
+<!--
+Configuration file generated for:
+
+Full name.......: VMware vCenter Server 6.0.0 build-3339084
+API type........: VirtualCenter
+API version.....: 6.0
+Product name....: VMware VirtualCenter Server
+Product version.: 6.0
+OS type.........: linux-x64
+-->
+
+<datacollection-group name="VMware6">
+
+ <resourceType name="vmware6Cpu" label="VMware v6 Cpu" resourceLabel="${vmware6CpuName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6DaSt" label="VMware v6 DaSt" resourceLabel="${vmware6DaStName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6Disk" label="VMware v6 Disk" resourceLabel="${vmware6DiskName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6Net" label="VMware v6 Net" resourceLabel="${vmware6NetName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6StAdptr" label="VMware v6 StAdptr" resourceLabel="${vmware6StAdptrName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6StPth" label="VMware v6 StPth" resourceLabel="${vmware6StPthName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6Sys" label="VMware v6 Sys" resourceLabel="${vmware6SysName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6VrtDisk" label="VMware v6 VrtDisk" resourceLabel="${vmware6VrtDiskName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+ <resourceType name="vmware6vflashModule" label="VMware v6 vflashModule" resourceLabel="${vmware6vflashModuleName}">
+ <persistenceSelectorStrategy class="org.opennms.netmgt.collection.support.PersistAllSelectorStrategy"/>
+ <storageStrategy class="org.opennms.netmgt.collection.support.IndexStorageStrategy"/>
+ </resourceType>
+
+</datacollection-group>
diff --git a/events/opennms.events.xml b/events/opennms.events.xml
index 565582d..6946cfa 100644
--- a/events/opennms.events.xml
+++ b/events/opennms.events.xml
@@ -914,13 +914,15 @@
&lt;p&gt;This event is generated when node outage processing
determines that the critical path IP address/service for
this node is not responding..&lt;/p&gt;
+ &lt;p&gt;More information on the affected node(s) can be found at
+ the &lt;a href="opennms/pathOutage/index.jsp"&gt;Path Outages&lt;/a&gt; page.&lt;/p&gt;
</descr>
<logmsg dest="logndisplay">
%parm[nodelabel]% path outage. Critical path =
%parm[criticalPathIp]% %parm[criticalPathServiceName]%
</logmsg>
<severity>Major</severity>
- <alarm-data reduction-key="%uei%:%dpname%:%nodeid%" alarm-type="3" auto-clean="false"/>
+ <alarm-data reduction-key="%uei%:%dpname%:%parm[criticalPathIp]%:%parm[criticalPathServiceName]%" alarm-type="3" auto-clean="false"/>
</event>
<event>
<uei>uei.opennms.org/nodes/nodeGainedInterface</uei>
@@ -2078,4 +2080,26 @@
<update-field field-name="severity" update-on-reduction="true"/>
</alarm-data>
</event>
+
+ <event>
+ <uei>uei.opennms.org/internal/monitoringSystemAdded</uei>
+ <event-label>Monitoring system Added</event-label>
+ <descr> A new monitoring system has been added </descr>
+ <logmsg dest="logndisplay">A new monitoring system of type '%parm[monitoringSystemType]%' has been added with ID '%parm[monitoringSystemId]%' at location '%parm[monitoringSystemLocation]%'. </logmsg>
+ <severity>Normal</severity>
+ </event>
+ <event>
+ <uei>uei.opennms.org/internal/monitoringSystemLocationChanged</uei>
+ <event-label>Monitoring system Location Changed</event-label>
+ <descr> Monitoring system location changed</descr>
+ <logmsg dest="logndisplay"> Monitoring system of type '%parm[monitoringSystemType]%' with ID '%parm[monitoringSystemId]%' has changed its location from '%parm[monitoringSystemPreviousLocation]%' to '%parm[monitoringSystemLocation]%'. </logmsg>
+ <severity>Normal</severity>
+ </event>
+ <event>
+ <uei>uei.opennms.org/internal/monitoringSystemDeleted</uei>
+ <event-label>Monitoring system Deleted</event-label>
+ <descr> Monitoring system Deleted</descr>
+ <logmsg dest="logndisplay"> Monitoring system of type '%parm[monitoringSystemType]%' with ID '%parm[monitoringSystemId]%' at location '%parm[monitoringSystemLocation]%' has been deleted. </logmsg>
+ <severity>Normal</severity>
+ </event>
</events>
diff --git a/graphml-edge-status/.readme b/graphml-edge-status/.readme
new file mode 100644
index 0000000..51f5fa5
--- /dev/null
+++ b/graphml-edge-status/.readme
@@ -0,0 +1 @@
+Add your custom .groovy files to this folder.
diff --git a/infopanel/.readme b/infopanel/.readme
new file mode 100644
index 0000000..d1e91d8
--- /dev/null
+++ b/infopanel/.readme
@@ -0,0 +1 @@
+Add your custom info panel item files (*.HTML) to this folder.
\ No newline at end of file
diff --git a/jasper-reports.xml b/jasper-reports.xml
index e9d9e62..d7c187a 100644
--- a/jasper-reports.xml
+++ b/jasper-reports.xml
@@ -16,5 +16,6 @@
<report id="AssetMangementMaintExpired" template="AssetManagementMaintExpired.jrxml" engine="jdbc" />
<report id="AssetMangementMaintStrategy" template="AssetManagementMaintStrategy.jrxml" engine="jdbc" />
<report id="Event-Analysis" template="EventAnalysis.jrxml" engine="jdbc" />
- <report id="DiskUsageForCTX" template="DiskUsageForCTX.jrxml" engine="jdbc" />
+ <report id="DiskUsageForCTX" template="DiskUsageForCTX.jrxml" engine="jdbc" />
+ <report id="Top20-IOWait" template="TopIOWait.jrxml" engine="jdbc" />
</jasper-reports>
diff --git a/java.util.logging.properties b/java.util.logging.properties
index 6b21730..b7b50c5 100644
--- a/java.util.logging.properties
+++ b/java.util.logging.properties
@@ -20,3 +20,4 @@
# Empty java.util.logging.properties to prevent the log to stderr, so that
# all logs will be delegated to pax logging JUL handler only
+
diff --git a/jmx-datacollection-config.d/cassandra30x-newts.xml b/jmx-datacollection-config.d/cassandra30x-newts.xml
new file mode 100644
index 0000000..17255a7
--- /dev/null
+++ b/jmx-datacollection-config.d/cassandra30x-newts.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<jmx-datacollection-config>
+ <jmx-collection name="jmx-cassandra30x-newts">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <mbeans>
+ <!-- Newts :: AllMemmtables -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesLiveDataSize">
+ <attrib name="Value" alias="alMemTblLiDaSi" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesOffHeapDataSize">
+ <attrib name="Value" alias="alMemTblOffHeapDaSi" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesOnHeapDataSize">
+ <attrib name="Value" alias="alMemTblOnHeapDaSi" type="gauge"/>
+ </mbean>
+
+ <!-- Memtable :: Count -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableSwitchCount">
+ <attrib name="Value" alias="memTblSwitchCount" type="gauge"/>
+ </mbean>
+
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableColumnsCount">
+ <attrib name="Value" alias="memTblColumnsCnt" type="gauge"/>
+ </mbean>
+
+ <!-- Memtable :: Sizes -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableLiveDataSize">
+ <attrib name="Value" alias="memTblLiveDaSi" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableOffHeapDataSize">
+ <attrib name="Value" alias="memTblOffHeapDaSi" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableOnHeapDataSize">
+ <attrib name="Value" alias="memTblOnHeapDaSi" type="gauge"/>
+ </mbean>
+
+ <!-- Latency -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=ReadTotalLatency">
+ <attrib name="Count" alias="readTotLtncy" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=RangeLatency">
+ <attrib name="99thPercentile" alias="rangeLtncy99" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=WriteTotalLatency">
+ <attrib name="Count" alias="writeTotLtncy" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasCommitTotalLatency">
+ <attrib name="Count" alias="casCommitTotLtncy" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasPrepareTotalLatency">
+ <attrib name="Count" alias="casPrepareTotLtncy" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasProposeTotalLatency">
+ <attrib name="Count" alias="casProposeTotLtncy" type="counter"/>
+ </mbean>
+
+ <!-- Bloom Filter -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=BloomFilterDiskSpaceUsed">
+ <attrib name="Value" alias="blmFltrDskSpcUsed" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=BloomFilterOffHeapMemoryUsed">
+ <attrib name="Value" alias="blmFltrOffHeapMemUs" type="gauge"/>
+ </mbean>
+
+ <!-- Memory Used -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CompressionMetadataOffHeapMemoryUsed">
+ <attrib name="Value" alias="cmpMetaOffHeapMemUs" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=IndexSummaryOffHeapMemoryUsed">
+ <attrib name="Value" alias="idxSumOffHeapMemUs" type="gauge"/>
+ </mbean>
+
+ <!-- Pending -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=PendingCompactions">
+ <attrib name="Value" alias="pendingCompactions" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=PendingFlushes">
+ <attrib name="Value" alias="pendingFlushes" type="gauge"/>
+ </mbean>
+
+ <!-- Disk Space -->
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=TotalDiskSpaceUsed">
+ <attrib name="Value" alias="totalDiskSpaceUsed" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Keyspace"
+ objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=LiveDiskSpaceUsed">
+ <attrib name="Value" alias="liveDiskSpaceUsed" type="gauge"/>
+ </mbean>
+ </mbeans>
+ </jmx-collection>
+</jmx-datacollection-config>
diff --git a/jmx-datacollection-config.d/cassandra30x.xml b/jmx-datacollection-config.d/cassandra30x.xml
new file mode 100644
index 0000000..897745b
--- /dev/null
+++ b/jmx-datacollection-config.d/cassandra30x.xml
@@ -0,0 +1,254 @@
+<?xml version="1.0"?>
+<jmx-datacollection-config>
+ <jmx-collection name="jmx-cassandra30x">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <mbeans>
+
+ <!-- Clients -->
+ <mbean name="org.apache.cassandra.metrics.Client"
+ objectname="org.apache.cassandra.metrics:type=Client,name=connectedNativeClients">
+ <attrib name="Value" alias="clntConNativeClnts" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Client"
+ objectname="org.apache.cassandra.metrics:type=Client,name=connectedThriftClients">
+ <attrib name="Value" alias="clntConThriftClnts" type="gauge"/>
+ </mbean>
+
+ <!-- Compaction -->
+ <mbean name="org.apache.cassandra.metrics.Compaction"
+ objectname="org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted">
+ <attrib name="Count" alias="cpctBytesCompacted" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Compaction"
+ objectname="org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks">
+ <attrib name="Value" alias="cpctCompletedTasks" type="counter"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Compaction"
+ objectname="org.apache.cassandra.metrics:type=Compaction,name=PendingTasks">
+ <attrib name="Value" alias="cpctPendingTasks" type="gauge"/>
+ </mbean>
+
+ <!-- Storage -->
+ <mbean name="org.apache.cassandra.metrics.Storage"
+ objectname="org.apache.cassandra.metrics:type=Storage,name=Load">
+ <attrib name="Count" alias="strgLoad" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.Storage"
+ objectname="org.apache.cassandra.metrics:type=Storage,name=Exceptions">
+ <attrib name="Count" alias="strgExceptions" type="counter"/>
+ </mbean>
+
+ <!-- Dropped Messages -->
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped">
+ <attrib name="Count" alias="drpdMsgRead" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped">
+ <attrib name="Count" alias="drpdMsgReadRepair" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped">
+ <attrib name="Count" alias="drpdMsgReqResp" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped">
+ <attrib name="Count" alias="drpdMsgRangeSlice" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped">
+ <attrib name="Count" alias="drpdMsgMutation" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.DroppedMessage"
+ objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=PAGED_RANGE,name=Dropped">
+ <attrib name="Count" alias="drpdMsgPagedRange" type="gauge"/>
+ </mbean>
+
+ <!-- ThreadPools :: MemtableFlushWriter -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntMemTblFlsWrAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntMemTblFlsWrCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=PendingTasks">
+ <attrib name="Value" alias="tpIntMemTblFlsWrPt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CompletedTasks">
+ <attrib name="Count" alias="tpIntMemTblFlsWrCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: MemtablePostFlush -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntMemTblPoFlsAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntMemTblPoFlsCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=PendingTasks">
+ <attrib name="Value" alias="tpIntMemTblPoFlsPt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CompletedTasks">
+ <attrib name="Count" alias="tpIntMemTblPoFlsCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: AntiEntropyStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntAntiEntStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntAntiEntStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=PendingTasks">
+ <attrib name="Value" alias="tpIntAntiEntStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpIntAntiEntStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: GossipStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntGosStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntGosStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=PendingTasks">
+ <attrib name="Value" alias="tpIntGosStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpIntGosStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: MigrationStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntMigStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntMigStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=PendingTasks">
+ <attrib name="Value" alias="tpIntMigStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpIntMigStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: MiscStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpIntMiscStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpIntMiscStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=PendingTasks">
+ <attrib name="Value" alias="tpIntMiscStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpIntMiscStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: MutationStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpMutStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks">
+ <attrib name="Value" alias="tpMutStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks">
+ <attrib name="Value" alias="tpMutStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpMutStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: ReadStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpReadStageAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks">
+ <attrib name="Value" alias="tpReadStageCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks">
+ <attrib name="Value" alias="tpReadStagePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpReadStageCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: RequestResponseStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpReqRespStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks">
+ <attrib name="Value" alias="tpReqRespStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks">
+ <attrib name="Value" alias="tpReqRespStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpReqRespStgeCt" type="counter"/>
+ </mbean>
+
+ <!-- ThreadPools :: ReadRepairStage -->
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=ActiveTasks">
+ <attrib name="Value" alias="tpReadRepairStgeAt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks">
+ <attrib name="Count" alias="tpReadRepairStgeCbt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks">
+ <attrib name="Value" alias="tpReadRepairStgePt" type="gauge"/>
+ </mbean>
+ <mbean name="org.apache.cassandra.metrics.ThreadPools"
+ objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CompletedTasks">
+ <attrib name="Value" alias="tpReadRepairStgeCt" type="counter"/>
+ </mbean>
+ </mbeans>
+ </jmx-collection>
+</jmx-datacollection-config>
diff --git a/jmx-datacollection-config.d/jboss.xml b/jmx-datacollection-config.d/jboss.xml
new file mode 100644
index 0000000..3595bde
--- /dev/null
+++ b/jmx-datacollection-config.d/jboss.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<jmx-datacollection-config>
+ <jmx-collection name="jmx-jboss">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <mbeans>
+ <mbean name="SystemInfo" objectname="jboss.system:type=ServerInfo">
+ <attrib name="FreeMemory" alias="FreeMemory" type="gauge"/>
+ <attrib name="TotalMemory" alias="TotalMemory" type="gauge"/>
+ </mbean>
+ <mbean name="DefaultDSManagedConnectionPool"
+ objectname="jboss.jca:service=ManagedConnectionPool,name=DefaultDS">
+ <attrib name="AvailableConnectionCount" alias="DefDS_AvailConns" type="gauge"/>
+ <attrib name="ConnectionCount" alias="DefDS_Conns" type="gauge"/>
+ <attrib name="InUseConnectionCount" alias="DefDS_InUseConns" type="gauge"/>
+ <attrib name="ConnectionCreatedCount" alias="DefDS_CreatedConns" type="counter"/>
+ <attrib name="ConnectionDestroyedCount" alias="DefDS_DestroyConns" type="counter"/>
+ </mbean>
+ <!-- JmsXA Connector Pool
+ <mbean name="JmsXAManagedConnectionPool" objectname="jboss.jca:service=ManagedConnectionPool,name=JmsXA">
+ <attrib name="AvailableConnectionCount" alias="JmsXA_AvailConns" type="gauge"/>
+ <attrib name="ConnectionCount" alias="JmsXA_Conns" type="gauge"/>
+ <attrib name="InUseConnectionCount" alias="JmsXA_InUseConns" type="gauge"/>
+ <attrib name="ConnectionCreatedCount" alias="JmsXA_CreatedConns" type="gauge"/>
+ <attrib name="ConnectionDestroyedCount" alias="JmsXA_DestroyConns" type="gauge"/>
+ </mbean-->
+ <!-- Destination Manager
+ <mbean name="MQ_DestinationManager" objectname="jboss.mq:service=DestinationManager">
+ <attrib name="ClientCount" alias="MQ_ClientCount" type="gauge"/>
+ </mbean-->
+ <!-- an example of a Queue
+ <mbean name="MQ_DestinationQueueA" objectname="jboss.mq.destination:service=Queue,name=A">
+ <attrib name="QueueDepth" alias="A_QueueDepth" type="gauge"/>
+ <attrib name="ReceiversCount" alias="A_QueueRecv" type="gauge"/>
+ <attrib name="MaxDepth" alias="A_QueueMaxDepth" type="gauge"/>
+ </mbean-->
+ <!-- an example of a Topic
+ <mbean name="SecuredTopic" objectname="jboss.mq.destination:service=Topic,name=securedTopic">
+ <attrib name="DurableMessageCount" alias="ST_DurMsg" type="gauge"/>
+ <attrib name="NonDurableMessageCount" alias="ST_NonDurMsg" type="gauge"/>
+ <attrib name="NonDurableSubscriptionCount" alias="ST_NonDurSub" type="gauge"/>
+ <attrib name="DurableSubscriptionCount" alias="ST_DurSub" type="gauge"/>
+ <attrib name="AllMessageCount" alias="ST_AllMsg" type="gauge"/>
+ <attrib name="MaxDepth" alias="ST_MaxDepth" type="gauge"/>
+ <attrib name="AllSubscriptionsCount" alias="ST_AllSub" type="gauge"/>
+ </mbean-->
+ <!-- Global Request Processor -->
+ <mbean name="GlobalRequestProcessor"
+ objectname="jboss.web:type=GlobalRequestProcessor,name=http-0.0.0.0-8080">
+ <attrib name="requestCount" alias="GRP_requests" type="counter"/>
+ <attrib name="maxTime" alias="GRP_maxTime" type="gauge"/>
+ <attrib name="bytesSent" alias="GRP_bytesSent" type="counter"/>
+ <attrib name="bytesReceived" alias="GRP_bytesRec" type="counter"/>
+ <attrib name="processingTime" alias="GRP_procTime" type="counter"/>
+ <attrib name="errorCount" alias="GRP_errors" type="counter"/>
+ </mbean>
+ <!-- Thread Pool -->
+ <mbean name="ThreadPool" objectname="jboss.web:type=ThreadPool,name=http-0.0.0.0-8080">
+ <attrib name="currentThreadsBusy" alias="BusyThreads" type="gauge"/>
+ <attrib name="currentThreadCount" alias="Threads" type="gauge"/>
+ <attrib name="minSpareThreads" alias="MinSpareThreads" type="gauge"/>
+ <attrib name="maxSpareThreads" alias="MaxSpareThreads" type="gauge"/>
+ <attrib name="maxThreads" alias="MaxThreads" type="gauge"/>
+ </mbean>
+ </mbeans>
+ </jmx-collection>
+</jmx-datacollection-config>
diff --git a/jmx-datacollection-config.d/kafka.xml b/jmx-datacollection-config.d/kafka.xml
new file mode 100644
index 0000000..38d91e6
--- /dev/null
+++ b/jmx-datacollection-config.d/kafka.xml
@@ -0,0 +1,240 @@
+<?xml version="1.0"?>
+<jmx-datacollection-config rrdRepository="/opt/opennms/share/rrd/snmp/">
+ <jmx-collection name="jmx-kafka">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <mbeans>
+
+ <mbean name="JVM Memory" objectname="java.lang:type=OperatingSystem">
+ <attrib name="FreePhysicalMemorySize" alias="FreeMemory" type="gauge"/>
+ <attrib name="TotalPhysicalMemorySize" alias="TotalMemory" type="gauge"/>
+ <attrib name="FreeSwapSpaceSize" alias="FreeSwapSpace" type="gauge"/>
+ <attrib name="TotalSwapSpaceSize" alias="TotalSwapSpace" type="gauge"/>
+ </mbean>
+ <mbean name="JVM Threading" objectname="java.lang:type=Threading">
+ <attrib name="ThreadCount" alias="ThreadCount" type="gauge"/>
+ <attrib name="PeakThreadCount" alias="PeakThreadCount" type="gauge"/>
+ <attrib name="DaemonThreadCount" alias="DaemonThreadCount" type="gauge"/>
+ <attrib name="CurrentThreadCpuTime" alias="CurThreadCpuTime" type="gauge"/>
+ </mbean>
+ <mbean name="JVM GarbageCollector:MarkSweepCompact" objectname="java.lang:type=GarbageCollector,name=MarkSweepCompact">
+ <attrib name="CollectionCount" alias="MSCCollCnt" type="counter"/>
+ <attrib name="CollectionTime" alias="MSCCollTime" type="counter"/>
+ <comp-attrib name="LastGcInfo" type="Composite" alias="MSCLastGcInfo">
+ <comp-member name="GcThreadCount" type="gauge" alias="MSCGcThreadCnt"/>
+ <comp-member name="duration" type="gauge" alias="MSCDuration"/>
+ <comp-member name="endTime" type="gauge" alias="MSCEndTime"/>
+ </comp-attrib>
+ </mbean>
+
+
+ <!-- ================== -->
+ <!-- BrokerTopicMetrics -->
+ <!-- ================== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+
+ <mbean name="Messages In Per Second" objectname="kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec">
+ <attrib name="Count" alias="msgInPerSec" type="gauge"/>
+ </mbean>
+
+ <mbean name="Bytes In Per Second" objectname="kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec">
+ <attrib name="Count" alias="bytesInPerSec" type="gauge"/>
+ </mbean>
+
+ <mbean name="Bytes Out Per Second" objectname="kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec">
+ <attrib name="Count" alias="bytesOutPerSec" type="gauge"/>
+ </mbean>
+
+
+ <!-- ============== -->
+ <!-- ReplicaManager -->
+ <!-- ============== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+ <mbean name="Under-Replicated Partitions" objectname="kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions">
+ <attrib name="Value" alias="underReplPart" type="gauge"/>
+ </mbean>
+
+ <mbean name="Partitions" objectname="kafka.server:type=ReplicaManager,name=PartitionCount">
+ <attrib name="Value" alias="partitions" type="gauge"/>
+ </mbean>
+
+ <mbean name="Leaders" objectname="kafka.server:type=ReplicaManager,name=LeaderCount">
+ <attrib name="Value" alias="leaders" type="gauge"/>
+ </mbean>
+
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+ <mbean name="In-Sync Replica Shrinks Per Second" objectname="kafka.server:type=ReplicaManager,name=IsrShrinksPerSec">
+ <attrib name="Count" alias="isrShrinksPerSec" type="gauge"/>
+ </mbean>
+
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+ <mbean name="In-Sync Replica Expands Per Second" objectname="kafka.server:type=ReplicaManager,name=IsrExpandsPerSec">
+ <attrib name="Count" alias="isrExpandsPerSec" type="gauge"/>
+ </mbean>
+
+
+ <!-- ===================== -->
+ <!-- ReplicaFetcherManager -->
+ <!-- ===================== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+
+ <mbean name="Maximum Lag Between Replicas" objectname="kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica">
+ <attrib name="Value" alias="replicaMaxLag" type="gauge"/>
+ </mbean>
+
+
+ <!-- =============== -->
+ <!-- KafkaController -->
+ <!-- =============== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+
+ <mbean name="Active Controllers" objectname="kafka.controller:type=KafkaController,name=ActiveControllerCount">
+ <attrib name="Value" alias="activeControllers" type="gauge"/>
+ </mbean>
+
+ <mbean name="Offline Partitions" objectname="kafka.controller:type=KafkaController,name=OfflinePartitionsCount">
+ <attrib name="Value" alias="offlinePartitions" type="gauge"/>
+ </mbean>
+
+
+ <!-- =============== -->
+ <!-- ControllerStats -->
+ <!-- =============== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+
+ <mbean name="Leader Election Rate and Time (ms)" objectname="kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs">
+ <attrib name="Count" alias="leaderEps" type="gauge"/>
+ </mbean>
+
+ <mbean name="Unclean Leader Elections Per Second" objectname="kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec">
+ <attrib name="Count" alias="uncleanLeaderEps" type="gauge"/>
+ </mbean>
+
+
+ <!-- ========================= -->
+ <!-- DelayedOperationPurgatory -->
+ <!-- ========================= -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+
+ <mbean name="Purgatory Size: Produce" objectname="kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Produce">
+ <attrib name="Value" alias="purgatoryProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Purgatory Size: Fetch" objectname="kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Fetch">
+ <attrib name="Value" alias="purgatoryFetch" type="gauge"/>
+ </mbean>
+
+
+ <!-- ============ -->
+ <!-- SocketServer -->
+ <!-- ============ -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+
+ <mbean name="Network Processor Average Idle Percentage" objectname="kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent">
+ <attrib name="Value" alias="netProcAvgIdle" type="gauge"/>
+ </mbean>
+
+
+ <!-- ======================= -->
+ <!-- KafkaRequestHandlerPool -->
+ <!-- ======================= -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+
+ <mbean name="Request Handler Average Idle Percentage" objectname="kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent">
+ <attrib name="Value" alias="reqHandAvgIdle" type="gauge"/>
+ </mbean>
+
+
+ <!-- ============== -->
+ <!-- RequestMetrics -->
+ <!-- ============== -->
+ <!-- https://kafka.apache.org/090/ops.html -->
+ <!-- https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/ -->
+
+ <mbean name="Requests Per Second: Produce" objectname="kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce">
+ <attrib name="Count" alias="reqSecProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Requests Per Second: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=RequestsPerSec,request=FetchConsumer">
+ <attrib name="Count" alias="reqSecConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Requests Per Second: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=RequestsPerSec,request=FetchFollower">
+ <attrib name="Count" alias="reqSecFollower" type="gauge"/>
+ </mbean>
+
+ <mbean name="Total Time: Produce" objectname="kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce">
+ <attrib name="Count" alias="totalTimeProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Total Time: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer">
+ <attrib name="Count" alias="totalTimeConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Total Time: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower">
+ <attrib name="Count" alias="totalTimeFollower" type="gauge"/>
+ </mbean>
+
+ <mbean name="Queue Time: Produce" objectname="kafka.network:type=RequestMetrics,name=RequestQueueTimeMs,request=Produce">
+ <attrib name="Count" alias="queueTimeProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Queue Time: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=RequestQueueTimeMs,request=FetchConsumer">
+ <attrib name="Count" alias="queueTimeConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Queue Time: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=RequestQueueTimeMs,request=FetchFollower">
+ <attrib name="Count" alias="queueTimeFollower" type="gauge"/>
+ </mbean>
+
+ <mbean name="Local Time: Produce" objectname="kafka.network:type=RequestMetrics,name=LocalTimeMs,request=Produce">
+ <attrib name="Count" alias="localTimeProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Local Time: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=LocalTimeMs,request=FetchConsumer">
+ <attrib name="Count" alias="localTimeConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Local Time: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=LocalTimeMs,request=FetchFollower">
+ <attrib name="Count" alias="localTimeFollower" type="gauge"/>
+ </mbean>
+
+ <mbean name="Remote Time: Produce" objectname="kafka.network:type=RequestMetrics,name=RemoteTimeMs,request=Produce">
+ <attrib name="Count" alias="remoteTimeProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Remote Time: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=RemoteTimeMs,request=FetchConsumer">
+ <attrib name="Count" alias="remoteTimeConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Remote Time: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=RemoteTimeMs,request=FetchFollower">
+ <attrib name="Count" alias="remoteTimeFollower" type="gauge"/>
+ </mbean>
+
+ <mbean name="Response Send Time: Produce" objectname="kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce">
+ <attrib name="Count" alias="sendTimeProduce" type="gauge"/>
+ </mbean>
+
+ <mbean name="Response Send Time: FetchConsumer" objectname="kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer">
+ <attrib name="Count" alias="sendTimeConsumer" type="gauge"/>
+ </mbean>
+
+ <mbean name="Response Send Time: FetchFollower" objectname="kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower">
+ <attrib name="Count" alias="sendTimeFollower" type="gauge"/>
+ </mbean>
+
+ </mbeans>
+
+ </jmx-collection>
+</jmx-datacollection-config>
diff --git a/jmx-datacollection-config.d/minion.xml b/jmx-datacollection-config.d/minion.xml
new file mode 100644
index 0000000..e8f0f88
--- /dev/null
+++ b/jmx-datacollection-config.d/minion.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0"?>
+<jmx-datacollection-config rrdRepository="/opt/opennms/share/rrd/snmp/">
+ <jmx-collection name="jmx-minion">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <mbeans>
+ <mbean name="JVM Memory" objectname="java.lang:type=OperatingSystem">
+ <attrib name="FreePhysicalMemorySize" alias="FreeMemory" type="gauge"/>
+ <attrib name="TotalPhysicalMemorySize" alias="TotalMemory" type="gauge"/>
+ <attrib name="FreeSwapSpaceSize" alias="FreeSwapSpace" type="gauge"/>
+ <attrib name="TotalSwapSpaceSize" alias="TotalSwapSpace" type="gauge"/>
+ </mbean>
+ <mbean name="JVM Threading" objectname="java.lang:type=Threading">
+ <attrib name="ThreadCount" alias="ThreadCount" type="gauge"/>
+ <attrib name="PeakThreadCount" alias="PeakThreadCount" type="gauge"/>
+ <attrib name="DaemonThreadCount" alias="DaemonThreadCount" type="gauge"/>
+ <attrib name="CurrentThreadCpuTime" alias="CurThreadCpuTime" type="gauge"/>
+ </mbean>
+ <mbean name="JVM GarbageCollector:MarkSweepCompact" objectname="java.lang:type=GarbageCollector,name=MarkSweepCompact">
+ <attrib name="CollectionCount" alias="MSCCollCnt" type="counter"/>
+ <attrib name="CollectionTime" alias="MSCCollTime" type="counter"/>
+ <comp-attrib name="LastGcInfo" type="Composite" alias="MSCLastGcInfo">
+ <comp-member name="GcThreadCount" type="gauge" alias="MSCGcThreadCnt"/>
+ <comp-member name="duration" type="gauge" alias="MSCDuration"/>
+ <comp-member name="endTime" type="gauge" alias="MSCEndTime"/>
+ </comp-attrib>
+ </mbean>
+
+
+ <!-- Route stats for syslogListen -->
+ <mbean name="Syslog Listener" objectname="org.apache.camel:context=syslogdListenerCamelNettyContext,type=routes,name=&quot;syslogListen&quot;">
+ <attrib name="ExchangesCompleted" alias="SlogListComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="SlogListFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="SlogListTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="SlogListMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="SlogListMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="SlogListMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="SlogListLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="SlogListTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.Detect -->
+ <mbean name="Provisioning Detectors RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.Detect&quot;">
+ <attrib name="ExchangesCompleted" alias="DetectComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="DetectFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="DetectTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="DetectMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="DetectMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="DetectMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="DetectLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="DetectTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.DNS -->
+ <mbean name="DNS RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.DNS&quot;">
+ <attrib name="ExchangesCompleted" alias="DnsComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="DnsFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="DnsTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="DnsMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="DnsMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="DnsMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="DnsLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="DnsTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.PING -->
+ <mbean name="Ping RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.PING&quot;">
+ <attrib name="ExchangesCompleted" alias="PingComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="PingFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="PingTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="PingMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="PingMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="PingMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="PingLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="PingTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.PING-SWEEP -->
+ <mbean name="Ping Sweep RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.PING-SWEEP&quot;">
+ <attrib name="ExchangesCompleted" alias="SweepComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="SweepFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="SweepTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="SweepMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="SweepMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="SweepMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="SweepLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="SweepTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.Poller -->
+ <mbean name="Poller RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.Poller&quot;">
+ <attrib name="ExchangesCompleted" alias="PollComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="PollFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="PollTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="PollMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="PollMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="PollMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="PollLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="PollTotProc" type="counter"/>
+ </mbean>
+
+
+ <!-- Route stats for RPC.Server.SNMP -->
+ <mbean name="SNMP RPC Server" objectname="org.apache.camel:context=org.opennms.core.ipc.rpc.camel-impl,type=routes,name=&quot;RPC.Server.SNMP&quot;">
+ <attrib name="ExchangesCompleted" alias="SnmpComplete" type="counter"/>
+ <attrib name="ExchangesFailed" alias="SnmpFailed" type="counter"/>
+ <attrib name="ExchangesTotal" alias="SnmpTotal" type="counter"/>
+ <attrib name="MaxProcessingTime" alias="SnmpMaxProc" type="gauge"/>
+ <attrib name="MeanProcessingTime" alias="SnmpMeanProc" type="gauge"/>
+ <attrib name="MinProcessingTime" alias="SnmpMinProc" type="gauge"/>
+ <attrib name="LastProcessingTime" alias="SnmpLastProc" type="gauge"/>
+ <attrib name="TotalProcessingTime" alias="SnmpTotProc" type="counter"/>
+ </mbean>
+
+
+ <!--
+ Example of SEDA endpoint collection that won't work well because the objectname is based on the URI which might change
+
+ <mbean name="Syslog Marshaller Queue" objectname="org.apache.camel:context=syslogdHandlerMinionContext,type=endpoints,name=&quot;seda://handleMessage?concurrentConsumers=4&quot;">
+ <attrib name="CurrentQueueSize" alias="SlogMarQueue" type="gauge"/>
+ </mbean>
+ -->
+ </mbeans>
+ </jmx-collection>
+</jmx-datacollection-config>
diff --git a/jmx-datacollection-config.xml b/jmx-datacollection-config.xml
index b49c44b..44c0afc 100644
--- a/jmx-datacollection-config.xml
+++ b/jmx-datacollection-config.xml
@@ -1,74 +1,5 @@
<?xml version="1.0"?>
<jmx-datacollection-config rrdRepository="/opt/opennms/share/rrd/snmp/">
- <jmx-collection name="jboss">
- <rrd step="300">
- <rra>RRA:AVERAGE:0.5:1:2016</rra>
- <rra>RRA:AVERAGE:0.5:12:1488</rra>
- <rra>RRA:AVERAGE:0.5:288:366</rra>
- <rra>RRA:MAX:0.5:288:366</rra>
- <rra>RRA:MIN:0.5:288:366</rra>
- </rrd>
- <mbeans>
- <mbean name="SystemInfo" objectname="jboss.system:type=ServerInfo">
- <attrib name="FreeMemory" alias="FreeMemory" type="gauge"/>
- <attrib name="TotalMemory" alias="TotalMemory" type="gauge"/>
- </mbean>
- <mbean name="DefaultDSManagedConnectionPool"
- objectname="jboss.jca:service=ManagedConnectionPool,name=DefaultDS">
- <attrib name="AvailableConnectionCount" alias="DefDS_AvailConns" type="gauge"/>
- <attrib name="ConnectionCount" alias="DefDS_Conns" type="gauge"/>
- <attrib name="InUseConnectionCount" alias="DefDS_InUseConns" type="gauge"/>
- <attrib name="ConnectionCreatedCount" alias="DefDS_CreatedConns" type="counter"/>
- <attrib name="ConnectionDestroyedCount" alias="DefDS_DestroyConns" type="counter"/>
- </mbean>
- <!-- JmsXA Connector Pool
- <mbean name="JmsXAManagedConnectionPool" objectname="jboss.jca:service=ManagedConnectionPool,name=JmsXA">
- <attrib name="AvailableConnectionCount" alias="JmsXA_AvailConns" type="gauge"/>
- <attrib name="ConnectionCount" alias="JmsXA_Conns" type="gauge"/>
- <attrib name="InUseConnectionCount" alias="JmsXA_InUseConns" type="gauge"/>
- <attrib name="ConnectionCreatedCount" alias="JmsXA_CreatedConns" type="gauge"/>
- <attrib name="ConnectionDestroyedCount" alias="JmsXA_DestroyConns" type="gauge"/>
- </mbean-->
- <!-- Destination Manager
- <mbean name="MQ_DestinationManager" objectname="jboss.mq:service=DestinationManager">
- <attrib name="ClientCount" alias="MQ_ClientCount" type="gauge"/>
- </mbean-->
- <!-- an example of a Queue
- <mbean name="MQ_DestinationQueueA" objectname="jboss.mq.destination:service=Queue,name=A">
- <attrib name="QueueDepth" alias="A_QueueDepth" type="gauge"/>
- <attrib name="ReceiversCount" alias="A_QueueRecv" type="gauge"/>
- <attrib name="MaxDepth" alias="A_QueueMaxDepth" type="gauge"/>
- </mbean-->
- <!-- an example of a Topic
- <mbean name="SecuredTopic" objectname="jboss.mq.destination:service=Topic,name=securedTopic">
- <attrib name="DurableMessageCount" alias="ST_DurMsg" type="gauge"/>
- <attrib name="NonDurableMessageCount" alias="ST_NonDurMsg" type="gauge"/>
- <attrib name="NonDurableSubscriptionCount" alias="ST_NonDurSub" type="gauge"/>
- <attrib name="DurableSubscriptionCount" alias="ST_DurSub" type="gauge"/>
- <attrib name="AllMessageCount" alias="ST_AllMsg" type="gauge"/>
- <attrib name="MaxDepth" alias="ST_MaxDepth" type="gauge"/>
- <attrib name="AllSubscriptionsCount" alias="ST_AllSub" type="gauge"/>
- </mbean-->
- <!-- Global Request Processor -->
- <mbean name="GlobalRequestProcessor"
- objectname="jboss.web:type=GlobalRequestProcessor,name=http-0.0.0.0-8080">
- <attrib name="requestCount" alias="GRP_requests" type="counter"/>
- <attrib name="maxTime" alias="GRP_maxTime" type="gauge"/>
- <attrib name="bytesSent" alias="GRP_bytesSent" type="counter"/>
- <attrib name="bytesReceived" alias="GRP_bytesRec" type="counter"/>
- <attrib name="processingTime" alias="GRP_procTime" type="counter"/>
- <attrib name="errorCount" alias="GRP_errors" type="counter"/>
- </mbean>
- <!-- Thread Pool -->
- <mbean name="ThreadPool" objectname="jboss.web:type=ThreadPool,name=http-0.0.0.0-8080">
- <attrib name="currentThreadsBusy" alias="BusyThreads" type="gauge"/>
- <attrib name="currentThreadCount" alias="Threads" type="gauge"/>
- <attrib name="minSpareThreads" alias="MinSpareThreads" type="gauge"/>
- <attrib name="maxSpareThreads" alias="MaxSpareThreads" type="gauge"/>
- <attrib name="maxThreads" alias="MaxThreads" type="gauge"/>
- </mbean>
- </mbeans>
- </jmx-collection>
<jmx-collection name="jsr160">
<rrd step="300">
<rra>RRA:AVERAGE:0.5:1:2016</rra>
@@ -368,375 +299,23 @@
<attrib name="999thPercentile" alias="CasCluster1Req999" type="gauge"/>
<attrib name="Count" alias="CasCluster1ReqCnt" type="counter"/>
</mbean>
- </mbeans>
- </jmx-collection>
-
- <jmx-collection name="cassandra21x">
- <rrd step="300">
- <rra>RRA:AVERAGE:0.5:1:2016</rra>
- <rra>RRA:AVERAGE:0.5:12:1488</rra>
- <rra>RRA:AVERAGE:0.5:288:366</rra>
- <rra>RRA:MAX:0.5:288:366</rra>
- <rra>RRA:MIN:0.5:288:366</rra>
- </rrd>
- <mbeans>
-
- <!-- Clients -->
- <mbean name="org.apache.cassandra.metrics.Client"
- objectname="org.apache.cassandra.metrics:type=Client,name=connectedNativeClients">
- <attrib name="Value" alias="clntConNativeClnts" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Client"
- objectname="org.apache.cassandra.metrics:type=Client,name=connectedThriftClients">
- <attrib name="Value" alias="clntConThriftClnts" type="gauge"/>
- </mbean>
-
- <!-- Compaction -->
- <mbean name="org.apache.cassandra.metrics.Compaction"
- objectname="org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted">
- <attrib name="Count" alias="cpctBytesCompacted" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Compaction"
- objectname="org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks">
- <attrib name="Value" alias="cpctCompletedTasks" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Compaction"
- objectname="org.apache.cassandra.metrics:type=Compaction,name=PendingTasks">
- <attrib name="Value" alias="cpctPendingTasks" type="gauge"/>
- </mbean>
-
- <!-- Storage -->
- <mbean name="org.apache.cassandra.metrics.Storage"
- objectname="org.apache.cassandra.metrics:type=Storage,name=Load">
- <attrib name="Count" alias="strgLoad" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Storage"
- objectname="org.apache.cassandra.metrics:type=Storage,name=Exceptions">
- <attrib name="Count" alias="strgExceptions" type="counter"/>
- </mbean>
-
- <!-- Dropped Messages -->
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped">
- <attrib name="Count" alias="drpdMsgRead" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped">
- <attrib name="Count" alias="drpdMsgReadRepair" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped">
- <attrib name="Count" alias="drpdMsgReqResp" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped">
- <attrib name="Count" alias="drpdMsgRangeSlice" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped">
- <attrib name="Count" alias="drpdMsgMutation" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.DroppedMessage"
- objectname="org.apache.cassandra.metrics:type=DroppedMessage,scope=PAGED_RANGE,name=Dropped">
- <attrib name="Count" alias="drpdMsgPagedRange" type="gauge"/>
- </mbean>
-
- <!-- ThreadPools :: MemtableFlushWriter -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=ActiveTasks">
- <attrib name="Value" alias="tpIntMemTblFlsWrAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntMemTblFlsWrCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=PendingTasks">
- <attrib name="Value" alias="tpIntMemTblFlsWrPt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtableFlushWriter,name=CompletedTasks">
- <attrib name="Count" alias="tpIntMemTblFlsWrCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: MemtablePostFlush -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=ActiveTasks">
- <attrib name="Value" alias="tpIntMemTblPoFlsAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntMemTblPoFlsCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=PendingTasks">
- <attrib name="Value" alias="tpIntMemTblPoFlsPt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MemtablePostFlush,name=CompletedTasks">
- <attrib name="Count" alias="tpIntMemTblPoFlsCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: AntiEntropyStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks">
- <attrib name="Value" alias="tpIntAntiEntStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntAntiEntStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=PendingTasks">
- <attrib name="Value" alias="tpIntAntiEntStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=CompletedTasks">
- <attrib name="Value" alias="tpIntAntiEntStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: GossipStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=ActiveTasks">
- <attrib name="Value" alias="tpIntGosStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntGosStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=PendingTasks">
- <attrib name="Value" alias="tpIntGosStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=GossipStage,name=CompletedTasks">
- <attrib name="Value" alias="tpIntGosStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: MigrationStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=ActiveTasks">
- <attrib name="Value" alias="tpIntMigStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntMigStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=PendingTasks">
- <attrib name="Value" alias="tpIntMigStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MigrationStage,name=CompletedTasks">
- <attrib name="Value" alias="tpIntMigStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: MiscStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=ActiveTasks">
- <attrib name="Value" alias="tpIntMiscStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpIntMiscStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=PendingTasks">
- <attrib name="Value" alias="tpIntMiscStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=MiscStage,name=CompletedTasks">
- <attrib name="Value" alias="tpIntMiscStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: MutationStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=ActiveTasks">
- <attrib name="Value" alias="tpMutStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks">
- <attrib name="Value" alias="tpMutStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks">
- <attrib name="Value" alias="tpMutStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CompletedTasks">
- <attrib name="Value" alias="tpMutStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: ReadStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=ActiveTasks">
- <attrib name="Value" alias="tpReadStageAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks">
- <attrib name="Value" alias="tpReadStageCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks">
- <attrib name="Value" alias="tpReadStagePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CompletedTasks">
- <attrib name="Value" alias="tpReadStageCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: RequestResponseStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=ActiveTasks">
- <attrib name="Value" alias="tpReqRespStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks">
- <attrib name="Value" alias="tpReqRespStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks">
- <attrib name="Value" alias="tpReqRespStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CompletedTasks">
- <attrib name="Value" alias="tpReqRespStgeCt" type="counter"/>
- </mbean>
-
- <!-- ThreadPools :: ReadRepairStage -->
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=ActiveTasks">
- <attrib name="Value" alias="tpReadRepairStgeAt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks">
- <attrib name="Count" alias="tpReadRepairStgeCbt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks">
- <attrib name="Value" alias="tpReadRepairStgePt" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.ThreadPools"
- objectname="org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CompletedTasks">
- <attrib name="Value" alias="tpReadRepairStgeCt" type="counter"/>
+ <mbean name="org.apache.activemq" objectname="org.apache.activemq:type=Broker,brokerName=localhost">
+ <attrib name="TotalConnectionsCount" alias="TtlConCnt" type="gauge"/>
+ <attrib name="TotalEnqueueCount" alias="TtlEnqCnt" type="gauge"/>
+ <attrib name="TotalDequeueCount" alias="TtlDeqCnt" type="gauge"/>
+ <attrib name="TotalConsumerCount" alias="TtlConsumerCnt" type="gauge"/>
+ <attrib name="TotalProducerCount" alias="TtlProdCnt" type="gauge"/>
+ <attrib name="TotalMessageCount" alias="TtlMsgCnt" type="gauge"/>
+ <attrib name="AverageMessageSize" alias="AvgMsgSize" type="gauge"/>
+ <attrib name="MaxMessageSize" alias="MaxMsgSize" type="gauge"/>
+ <attrib name="MinMessageSize" alias="MinMsgSize" type="gauge"/>
+ <attrib name="MemoryLimit" alias="MemLimit" type="gauge"/>
+ <attrib name="MemoryPercentUsage" alias="MemPctUsage" type="gauge"/>
+ <attrib name="StoreLimit" alias="StoreLimit" type="gauge"/>
+ <attrib name="StorePercentUsage" alias="StorePctUsage" type="gauge"/>
+ <attrib name="TempLimit" alias="TempLimit" type="gauge"/>
+ <attrib name="TempPercentUsage" alias="TempPctUsage" type="gauge"/>
</mbean>
</mbeans>
</jmx-collection>
-
- <jmx-collection name="cassandra21x-newts">
- <rrd step="300">
- <rra>RRA:AVERAGE:0.5:1:2016</rra>
- <rra>RRA:AVERAGE:0.5:12:1488</rra>
- <rra>RRA:AVERAGE:0.5:288:366</rra>
- <rra>RRA:MAX:0.5:288:366</rra>
- <rra>RRA:MIN:0.5:288:366</rra>
- </rrd>
- <mbeans>
- <!-- Newts :: AllMemmtables -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesLiveDataSize">
- <attrib name="Value" alias="alMemTblLiDaSi" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesOffHeapDataSize">
- <attrib name="Value" alias="alMemTblOffHeapDaSi" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=AllMemtablesOnHeapDataSize">
- <attrib name="Value" alias="alMemTblOnHeapDaSi" type="gauge"/>
- </mbean>
-
- <!-- Memtable :: Count -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableSwitchCount">
- <attrib name="Value" alias="memTblSwitchCount" type="gauge"/>
- </mbean>
-
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableColumnsCount">
- <attrib name="Value" alias="memTblColumnsCnt" type="gauge"/>
- </mbean>
-
- <!-- Memtable :: Sizes -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableLiveDataSize">
- <attrib name="Value" alias="memTblLiveDaSi" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableOffHeapDataSize">
- <attrib name="Value" alias="memTblOffHeapDaSi" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=MemtableOnHeapDataSize">
- <attrib name="Value" alias="memTblOnHeapDaSi" type="gauge"/>
- </mbean>
-
- <!-- Latency -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=ReadTotalLatency">
- <attrib name="Count" alias="readTotLtncy" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=RangeLatency">
- <attrib name="99thPercentile" alias="rangeLtncy99" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=WriteTotalLatency">
- <attrib name="Count" alias="writeTotLtncy" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasCommitTotalLatency">
- <attrib name="Count" alias="casCommitTotLtncy" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasPrepareTotalLatency">
- <attrib name="Count" alias="casPrepareTotLtncy" type="counter"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CasProposeTotalLatency">
- <attrib name="Count" alias="casProposeTotLtncy" type="counter"/>
- </mbean>
-
- <!-- Bloom Filter -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=BloomFilterDiskSpaceUsed">
- <attrib name="Value" alias="blmFltrDskSpcUsed" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=BloomFilterOffHeapMemoryUsed">
- <attrib name="Value" alias="blmFltrOffHeapMemUs" type="gauge"/>
- </mbean>
-
- <!-- Memory Used -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=CompressionMetadataOffHeapMemoryUsed">
- <attrib name="Value" alias="cmpMetaOffHeapMemUs" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=IndexSummaryOffHeapMemoryUsed">
- <attrib name="Value" alias="idxSumOffHeapMemUs" type="gauge"/>
- </mbean>
-
- <!-- Pending -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=PendingCompactions">
- <attrib name="Value" alias="pendingCompactions" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=PendingFlushes">
- <attrib name="Value" alias="pendingFlushes" type="gauge"/>
- </mbean>
-
- <!-- Disk Space -->
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=TotalDiskSpaceUsed">
- <attrib name="Value" alias="totalDiskSpaceUsed" type="gauge"/>
- </mbean>
- <mbean name="org.apache.cassandra.metrics.Keyspace"
- objectname="org.apache.cassandra.metrics:type=Keyspace,keyspace=newts,name=LiveDiskSpaceUsed">
- <attrib name="Value" alias="liveDiskSpaceUsed" type="gauge"/>
- </mbean>
- </mbeans>
- </jmx-collection>
-
</jmx-datacollection-config>
diff --git a/jmx.acl.cfg b/jmx.acl.cfg
index 531d8c4..ec42f3d 100644
--- a/jmx.acl.cfg
+++ b/jmx.acl.cfg
@@ -70,4 +70,4 @@ list* = viewer
get* = viewer
is* = viewer
set* = admin
-* = admin
\ No newline at end of file
+* = admin
diff --git a/jmx.acl.java.lang.Memory.cfg b/jmx.acl.java.lang.Memory.cfg
index 60e6275..a58bcf5 100644
--- a/jmx.acl.java.lang.Memory.cfg
+++ b/jmx.acl.java.lang.Memory.cfg
@@ -22,4 +22,4 @@
#
# For a description of the format of this file, see jmx.acl.cfg
#
-gc = manager
\ No newline at end of file
+gc = manager
diff --git a/jmx.acl.org.apache.karaf.bundle.cfg b/jmx.acl.org.apache.karaf.bundle.cfg
index dd318d8..b8ba95a 100644
--- a/jmx.acl.org.apache.karaf.bundle.cfg
+++ b/jmx.acl.org.apache.karaf.bundle.cfg
@@ -37,4 +37,4 @@ uninstall(java.lang.String)["0"] = #this is a comment, no roles can perform this
uninstall = admin
update(java.lang.String)[/([1-4])?[0-9]/] = admin
update(java.lang.String,java.lang.String)[/([1-4])?[0-9]/,/.*/] = admin
-update = manager
\ No newline at end of file
+update = manager
diff --git a/jmx.acl.org.apache.karaf.config.cfg b/jmx.acl.org.apache.karaf.config.cfg
index a597112..e6df22b 100644
--- a/jmx.acl.org.apache.karaf.config.cfg
+++ b/jmx.acl.org.apache.karaf.config.cfg
@@ -49,4 +49,4 @@ setProperty(java.lang.String,java.lang.String,java.lang.String) = manager
update(java.lang.String,java.util.Map)[/jmx[.]acl.*/,/.*/] = admin
update(java.lang.String,java.util.Map)[/org[.]apache[.]karaf[.]command[.]acl.+/,/.*/] = admin
update(java.lang.String,java.util.Map)[/org[.]apache[.]karaf[.]service[.]acl.+/,/.*/] = admin
-update(java.lang.String,java.util.Map) = manager
\ No newline at end of file
+update(java.lang.String,java.util.Map) = manager
diff --git a/jmx.acl.org.apache.karaf.security.jmx.cfg b/jmx.acl.org.apache.karaf.security.jmx.cfg
index 0af2c96..22471e5 100644
--- a/jmx.acl.org.apache.karaf.security.jmx.cfg
+++ b/jmx.acl.org.apache.karaf.security.jmx.cfg
@@ -24,4 +24,4 @@
#
# For a description of the format of this file, see jmx.acl.cfg
#
-canInvoke = viewer
\ No newline at end of file
+canInvoke = viewer
diff --git a/jmx.acl.osgi.compendium.cm.cfg b/jmx.acl.osgi.compendium.cm.cfg
index 2d9045d..345d7b9 100644
--- a/jmx.acl.osgi.compendium.cm.cfg
+++ b/jmx.acl.osgi.compendium.cm.cfg
@@ -50,4 +50,4 @@ update(java.lang.String,javax.management.openmbean.TabularData) = manager
updateForLocation(java.lang.String,java.lang.String,javax.management.openmbean.TabularData)[/jmx[.]acl.*/,/.*/,/.*/] = admin
updateForLocation(java.lang.String,java.lang.String,javax.management.openmbean.TabularData)[/org[.]apache[.]karaf[.]command[.]acl[.].+/,/.*/,/.*/] = admin
updateForLocation(java.lang.String,java.lang.String,javax.management.openmbean.TabularData)[/org[.]apache[.]karaf[.]service[.]acl[.].+/,/.*/,/.*/] = admin
-updateForLocation(java.lang.String,java.lang.String,javax.management.openmbean.TabularData) = manager
\ No newline at end of file
+updateForLocation(java.lang.String,java.lang.String,javax.management.openmbean.TabularData) = manager
diff --git a/jmxremote.access b/jmxremote.access
index 6254ae3..55aad04 100644
--- a/jmxremote.access
+++ b/jmxremote.access
@@ -1 +1,2 @@
admin readwrite
+jmx readonly
diff --git a/jre.properties b/jre.properties
index ea0d6d7..94da9db 100644
--- a/jre.properties
+++ b/jre.properties
@@ -539,4 +539,4 @@ jre-1.8= \
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers
\ No newline at end of file
+ org.xml.sax.helpers
diff --git a/keys.properties b/keys.properties
index d51e633..a13d3e6 100644
--- a/keys.properties
+++ b/keys.properties
@@ -32,4 +32,4 @@
# The user guide describes how to generate/update the key.
#
#karaf=AAAAB3NzaC1kc3MAAACBAP1/U4EddRIpUt9KnC7s5Of2EbdSPO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVClpJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith1yrv8iIDGZ3RSAHHAAAAFQCXYFCPFSMLzLKSuYKi64QL8Fgc9QAAAIEA9+GghdabPd7LvKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImog9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoAAACBAKKSU2PFl/qOLxIwmBZPPIcJshVe7bVUpFvyl3BbJDow8rXfskl8wO63OzP/qLmcJM0+JbcRU/53JjTuyk31drV2qxhIOsLDC9dGCWj47Y7TyhPdXh/0dthTRBy6bqGtRPxGa7gJov1xm/UuYYXPIUR/3x9MAZvZ5xvE0kYXO+rx,_g_:admingroup
-_g_\:admingroup = group,admin,manager,viewer
\ No newline at end of file
+_g_\:admingroup = group,admin,manager,viewer
diff --git a/libraries.properties b/libraries.properties
index cdc8199..99f1e82 100644
--- a/libraries.properties
+++ b/libraries.properties
@@ -1,4 +1,4 @@
-#Tue Feb 14 08:52:48 UTC 2017
+#Tue Feb 14 08:33:34 UTC 2017
opennms.library.jicmp=/usr/lib64/libjicmp.so
opennms.library.jrrd2=/usr/lib64/libjrrd2.so
opennms.library.jicmp6=/usr/lib64/libjicmp6.so
diff --git a/log4j2.xml b/log4j2.xml
index 78a3628..de3368b 100644
--- a/log4j2.xml
+++ b/log4j2.xml
@@ -99,6 +99,9 @@
<logger name="org.apache.bsf" additivity="false" level="INFO">
<appender-ref ref="RoutingAppender"/>
</logger>
+ <logger name="org.apache.camel.component.jms" additivity="false" level="INFO">
+ <appender-ref ref="RoutingAppender"/>
+ </logger>
<logger name="org.apache.commons" additivity="false" level="WARN">
<appender-ref ref="RoutingAppender"/>
</logger>
@@ -166,44 +169,45 @@
<DynamicThresholdFilter key="prefix" defaultThreshold="DEBUG">
<!-- always leave instrumentation logging at INFO -->
<KeyValuePair key="instrumentation" value="INFO" />
- <KeyValuePair key="access-point-monitor" value="WARN" />
- <KeyValuePair key="ackd" value="WARN" />
- <KeyValuePair key="actiond" value="WARN" />
- <KeyValuePair key="alarmd" value="WARN" />
- <KeyValuePair key="asterisk-gateway" value="WARN" />
- <KeyValuePair key="archiver" value="WARN" />
- <KeyValuePair key="bsmd" value="WARN" />
- <KeyValuePair key="collectd" value="WARN" />
- <KeyValuePair key="correlator" value="WARN" />
- <KeyValuePair key="dhcpd" value="WARN" />
- <KeyValuePair key="discovery" value="WARN" />
- <KeyValuePair key="eventd" value="WARN" />
- <KeyValuePair key="event-translator" value="WARN" />
- <KeyValuePair key="icmp" value="WARN" />
- <KeyValuePair key="jetty-server" value="WARN" />
- <KeyValuePair key="enlinkd" value="WARN" />
+ <KeyValuePair key="access-point-monitor" value="DEBUG" />
+ <KeyValuePair key="ackd" value="DEBUG" />
+ <KeyValuePair key="actiond" value="DEBUG" />
+ <KeyValuePair key="alarmd" value="DEBUG" />
+ <KeyValuePair key="asterisk-gateway" value="DEBUG" />
+ <KeyValuePair key="archiver" value="DEBUG" />
+ <KeyValuePair key="bsmd" value="DEBUG" />
+ <KeyValuePair key="collectd" value="DEBUG" />
+ <KeyValuePair key="correlator" value="DEBUG" />
+ <KeyValuePair key="dhcpd" value="DEBUG" />
+ <KeyValuePair key="discovery" value="DEBUG" />
+ <KeyValuePair key="eventd" value="DEBUG" />
+ <KeyValuePair key="event-translator" value="DEBUG" />
+ <KeyValuePair key="icmp" value="DEBUG" />
+ <KeyValuePair key="ipc" value="DEBUG" />
+ <KeyValuePair key="jetty-server" value="DEBUG" />
+ <KeyValuePair key="enlinkd" value="DEBUG" />
<KeyValuePair key="manager" value="DEBUG" />
- <KeyValuePair key="map" value="WARN" />
- <KeyValuePair key="notifd" value="WARN" />
- <KeyValuePair key="oss-qosd" value="WARN" />
- <KeyValuePair key="oss-qosdrx" value="WARN" />
- <KeyValuePair key="passive" value="WARN" />
- <KeyValuePair key="poller" value="WARN" />
- <KeyValuePair key="provisiond" value="WARN" />
- <KeyValuePair key="queued" value="WARN" />
- <KeyValuePair key="reportd" value="WARN" />
- <KeyValuePair key="reports" value="WARN" />
- <KeyValuePair key="rtc" value="WARN" />
- <KeyValuePair key="statsd" value="WARN" />
- <KeyValuePair key="scriptd" value="WARN" />
- <KeyValuePair key="snmp-poller" value="WARN" />
- <KeyValuePair key="syslogd" value="WARN" />
- <KeyValuePair key="threshd" value="WARN" />
- <KeyValuePair key="tl1d" value="WARN" />
- <KeyValuePair key="trapd" value="WARN" />
- <KeyValuePair key="trouble-ticketer" value="WARN" />
- <KeyValuePair key="vacuumd" value="WARN" />
- <KeyValuePair key="web" value="WARN" />
+ <KeyValuePair key="map" value="DEBUG" />
+ <KeyValuePair key="notifd" value="DEBUG" />
+ <KeyValuePair key="oss-qosd" value="DEBUG" />
+ <KeyValuePair key="oss-qosdrx" value="DEBUG" />
+ <KeyValuePair key="passive" value="DEBUG" />
+ <KeyValuePair key="poller" value="DEBUG" />
+ <KeyValuePair key="provisiond" value="DEBUG" />
+ <KeyValuePair key="queued" value="DEBUG" />
+ <KeyValuePair key="reportd" value="DEBUG" />
+ <KeyValuePair key="reports" value="DEBUG" />
+ <KeyValuePair key="rtc" value="DEBUG" />
+ <KeyValuePair key="statsd" value="DEBUG" />
+ <KeyValuePair key="scriptd" value="DEBUG" />
+ <KeyValuePair key="snmp-poller" value="DEBUG" />
+ <KeyValuePair key="syslogd" value="DEBUG" />
+ <KeyValuePair key="threshd" value="DEBUG" />
+ <KeyValuePair key="tl1d" value="DEBUG" />
+ <KeyValuePair key="trapd" value="DEBUG" />
+ <KeyValuePair key="trouble-ticketer" value="DEBUG" />
+ <KeyValuePair key="vacuumd" value="DEBUG" />
+ <KeyValuePair key="web" value="DEBUG" />
</DynamicThresholdFilter>
<appender-ref ref="RoutingAppender"/>
diff --git a/magic-users.properties b/magic-users.properties
deleted file mode 100644
index 1461809..0000000
--- a/magic-users.properties
+++ /dev/null
@@ -1,75 +0,0 @@
-############################################################################
-# magic-users.properties
-#--------------------------------------------------------------------------
-# This file defines the secondary authentication and authorization info for
-# the Web UI.
-#
-# The "magic" users set up here will take priority over users created in the
-# users.xml. For example, if you create a user in users.xml called "rtc",
-# its password will be ignored in favor of the password here, et cetera.
-#
-# The authorization roles set up here are all the roles available to the
-# system except for the default role: OpenNMS User. All users belong to
-# the OpenNMS User role, even the "magic" users defined in this file, unless
-# they are a member of one or more roles in this file *and* all of the roles
-# they are a member of have "notInDefaultGroup" set to true. All
-# other roles specify their own user list within this file.
-#
-###########################################################################
-
-
-###########################################################################
-## U S E R S
-###########################################################################
-
-# A comma-separated list of user keys. A user.{KEY}.username and
-# user.{KEY}.password property must be set for each key in this property.
-users=rtc
-
-# The RTC View Control Manager daemon uses this user to authenticate itself
-# while sending RTC data posts.
-user.rtc.username=rtc
-user.rtc.password=rtc
-
-###########################################################################
-## R O L E S
-###########################################################################
-
-# A comma-separated list of role keys. A role.{KEY}.name and
-# role.{KEY}.users property must be set for each key in this property.
-roles=rtc, admin, rouser, dashboard, provision, remoting, rest, asset, mobile
-
-# This role allows a user to make RTC data posts.
-role.rtc.name=OpenNMS RTC Daemon
-role.rtc.users=rtc
-role.rtc.notInDefaultGroup=true
-
-# This role allows users access to configuration and
-# administrative web pages.
-role.admin.name=OpenNMS Administrator
-role.admin.users=admin
-
-# This role disallows user write access
-role.rouser.name=OpenNMS Read-Only User
-role.rouser.users=
-
-# This role allows access to the dashboard only
-role.dashboard.name=OpenNMS Dashboard User
-role.dashboard.users=
-role.dashboard.notInDefaultGroup=true
-
-# This role allows edit access to the Asset Editor GUI
-role.asset.name=OpenNMS Asset Editor
-role.asset.users=
-
-role.provision.name=OpenNMS Provision User
-role.provision.users=
-
-role.remoting.name=OpenNMS Remote Poller User
-role.remoting.users=
-
-role.rest.name=OpenNMS REST User
-role.rest.users=iphone
-
-role.mobile.name=OpenNMS Mobile User
-role.mobile.users=
diff --git a/opennms-activemq.xml b/opennms-activemq.xml
index 3cf0384..72f99dc 100644
--- a/opennms-activemq.xml
+++ b/opennms-activemq.xml
@@ -36,6 +36,28 @@
The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
+ <plugins>
+ <bean id="openNMSJaasBrokerPlugin" class="org.opennms.netmgt.activemq.auth.OpenNMSJaasBrokerPlugin" xmlns="http://www.springframework.org/schema/beans" />
+
+ <authorizationPlugin>
+ <map>
+ <authorizationMap>
+ <authorizationEntries>
+ <!-- Users in the admin role can read/write/create any queue/topic -->
+ <authorizationEntry queue=">" read="admin" write="admin" admin="admin" />
+ <authorizationEntry topic=">" read="admin" write="admin" admin="admin"/>
+ <!-- Users in the minion role can write/create queues that are not keyed by location -->
+ <authorizationEntry queue="OpenNMS.*.*" write="minion" admin="minion" />
+ <!-- Users in the minion role can read/create from queues that are keyed by location -->
+ <authorizationEntry queue="OpenNMS.*.*.*" read="minion" admin="minion" />
+ <!-- Users in the minion role can read/write/create advisory topics -->
+ <authorizationEntry topic="ActiveMQ.Advisory.>" read="minion" write="minion" admin="minion"/>
+ </authorizationEntries>
+ <!-- Allow all users to read/write/create temporary destinations (by omitting a <tempDestinationAuthorizationEntry>) -->
+ </authorizationMap>
+ </map>
+ </authorizationPlugin>
+ </plugins>
<!--
For better performances use VM cursor and small memory limit.
@@ -135,7 +157,7 @@
<!-- Uncomment this line to allow external TCP connections -->
<!--
WARNING: Access to port 61616 should be firewalled to prevent unauthorized injection
- tof data into OpenNMS when this port is open.
+ of data into OpenNMS when this port is open.
-->
<!-- <transportConnector name="openwire" uri="tcp://0.0.0.0:61616?useJmx=false&amp;maximumConnections=1000&amp;wireformat.maxFrameSize=104857600"/> -->
diff --git a/opennms-upgrade-status.properties b/opennms-upgrade-status.properties
index a6ee1c9..1b6ab34 100644
--- a/opennms-upgrade-status.properties
+++ b/opennms-upgrade-status.properties
@@ -1,12 +1,14 @@
-#Tue Feb 14 08:52:59 UTC 2017
-JettyConfigMigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
-DataCollectionConfigMigrator17Offline=Tue Feb 14 08\:52\:58 UTC 2017
-EOLServiceConfigMigratorOffline=Tue Feb 14 08\:52\:59 UTC 2017
-RequisitionsMigratorOffline=Tue Feb 14 08\:52\:56 UTC 2017
-KscReportsMigrator=Tue Feb 14 08\:52\:57 UTC 2017
-DiscoveryConfigurationMigratorOffline=Tue Feb 14 08\:52\:59 UTC 2017
-ServiceConfig1701MigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
-DataCollectionConfigMigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
-ServiceConfigMigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
-JmxRrdMigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
-MonitoringLocationsMigratorOffline=Tue Feb 14 08\:52\:57 UTC 2017
+#Tue Feb 14 08:33:44 UTC 2017
+JettyConfigMigratorOffline=Tue Feb 14 08\:33\:41 UTC 2017
+DataCollectionConfigMigrator17Offline=Tue Feb 14 08\:33\:43 UTC 2017
+EOLServiceConfigMigratorOffline=Tue Feb 14 08\:33\:43 UTC 2017
+RequisitionsMigratorOffline=Tue Feb 14 08\:33\:41 UTC 2017
+DiscoveryConfigurationLocationMigratorOffline=Tue Feb 14 08\:33\:44 UTC 2017
+KscReportsMigrator=Tue Feb 14 08\:33\:42 UTC 2017
+MagicUsersMigratorOffline=Tue Feb 14 08\:33\:43 UTC 2017
+DiscoveryConfigurationMigratorOffline=Tue Feb 14 08\:33\:43 UTC 2017
+ServiceConfig1701MigratorOffline=Tue Feb 14 08\:33\:42 UTC 2017
+DataCollectionConfigMigratorOffline=Tue Feb 14 08\:33\:42 UTC 2017
+ServiceConfigMigratorOffline=Tue Feb 14 08\:33\:42 UTC 2017
+JmxRrdMigratorOffline=Tue Feb 14 08\:33\:41 UTC 2017
+MonitoringLocationsMigratorOffline=Tue Feb 14 08\:33\:42 UTC 2017
diff --git a/opennms.properties b/opennms.properties
index a0fe8e7..9813cff 100644
--- a/opennms.properties
+++ b/opennms.properties
@@ -28,15 +28,18 @@
# the default ICMP implementation used in the remote poller, since it does
# not rely on any external native code to be installed outside of the JVM.
#
-# To use the JNI ICMPv4 interface only, use the following property setting:
+# To use the JNI ICMPv4/ICMPv6 implementation, use the following property:
+#org.opennms.netmgt.icmp.pingerClass=org.opennms.netmgt.icmp.jni6.Jni6Pinger
+#
+# To use the JNI ICMPv4 interface only, use the following property:
#org.opennms.netmgt.icmp.pingerClass=org.opennms.netmgt.icmp.jni.JniPinger
#
# To use the JNA ICMPv4/ICMPv6 implementation, use the following property:
#org.opennms.netmgt.icmp.pingerClass=org.opennms.netmgt.icmp.jna.JnaPinger
#
-# The default is set to use the JNI ICMPv4/ICMPv6 interface like so:
-#org.opennms.netmgt.icmp.pingerClass=org.opennms.netmgt.icmp.jni6.Jni6Pinger
-
+# If no pingerClass is set, OpenNMS will attempt to choose the best
+# available pinger automatically.
+#
# By default, OpenNMS will start up if either ICMPv4 *or* ICMPv6 are
# available and initialize properly. If you wish to force IPv4 or IPv6
# explicitly, set one or both of these properties to true.
@@ -219,10 +222,10 @@ opennms.ticketer.plugin=org.opennms.netmgt.ticketd.NullTicketerPlugin
#opennms.alarmTroubleTicketLinkTemplate = <a href="http://172.20.0.76:8180/arsys/servlet/ViewFormServlet?form=HPD:Help%20Desk&server=itts3h&qual='Incident ID*%2B'=%22${id}%22">${id}</a>
# Enable this flag to ignore 'uei.opennms.org/troubleTicket/create' events against alarms with a severity of 'Cleared'
-#opennms.ticketer.skipCreateWhenCleared = false
+#opennms.ticketer.skipCreateWhenCleared = true
# Enable this flag to ignore 'uei.opennms.org/troubleTicket/close' events against alarms with a severity other than 'Cleared'
-#opennms.ticketer.skipCloseWhenNotCleared = false
+#opennms.ticketer.skipCloseWhenNotCleared = true
###### MISCELLANEOUS ######
@@ -342,17 +345,6 @@ org.eclipse.jetty.server.Request.maxFormKeys=2000
## in the keystore specified by the https-keystore property), uncomment and
## change this property.
#org.opennms.netmgt.jetty.https-keypassword = changeit
-## To specify a particular SSL certificate alias in the keystore, set this
-## property. Otherwise, the first certificate that is found will be used.
-#org.opennms.netmgt.jetty.https-cert-alias = opennms-jetty-certificate
-## To exclude specific SSL/TLS cipher suites from use, set this property to a
-## colon-separated list of suite names. Whitespace surrounding colons is OK.
-#org.opennms.netmgt.jetty.https-exclude-cipher-suites=SSL_DHE_DSS_WITH_DES_CBC_SHA: \
-# SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA:SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA: \
-# SSL_DHE_RSA_WITH_DES_CBC_SHA:SSL_RSA_EXPORT_WITH_DES40_CBC_SHA: \
-# SSL_RSA_EXPORT_WITH_RC4_40_MD5:SSL_RSA_WITH_3DES_EDE_CBC_SHA: \
-# SSL_RSA_WITH_DES_CBC_SHA:TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA: \
-# TLS_RSA_EXPORT_WITH_DES40_CBC_SHA:TLS_RSA_WITH_DES_CBC_SHA
# If OpenNMS is setting an incorrect HTML <base> tag, you can override
# its idea of the local URL here. The URL must end with a '/'. The following
@@ -441,6 +433,10 @@ gnu.io.SerialPorts=/dev/ttyACM0:/dev/ttyACM1:/dev/ttyACM2:/dev/ttyACM3:/dev/ttyA
# you will get a phone timed out exception
smslib.serial.polling=true
+###### EVENTD OPTIONS ######
+# This property is used to define the size of the event parsing cache. The size must be >= 0, where 0 disables caching.
+#org.opennms.eventd.eventTemplateCacheSize = 1000
+
###### PROVISIOND OPTIONS ######
#
# This property is used to enable/disable the handling of new suspect events
@@ -604,7 +600,7 @@ opennms.eventlist.showCount=false
###### JasperReports Properties ######
# Defines the Version of the Jasperreports library
-org.opennms.jasperReportsVersion=6.1.1
+org.opennms.jasperReportsVersion=6.3.0
# Define if duplicates are ignored when using pie charts within a JasperReport template (*.jrxml) file.
# See http://jasperreports.sourceforge.net/config.reference.html for more details.
@@ -612,7 +608,7 @@ net.sf.jasperreports.chart.pie.ignore.duplicated.key=true
###### Web Console - Front Page ######
# This value controls the content that will be displayed in the middle box of the front page.
-# The default is the view of SLM/RTC categories: /includes/categories-box.jsp.
+# The default is the view of SLM/RTC categories and the geographical map: /includes/categories-box.jsp,/geomap/map-box.jsp.
# You can also use a comma-seperated list of files to include more than one content file.
# Uncomment the following line to display the widget for the surveillance view from the dashboard.
# (It uses the same rules for the dashboard)
@@ -628,14 +624,6 @@ net.sf.jasperreports.chart.pie.ignore.duplicated.key=true
# services, you may need to add them to this list.
excludeServiceMonitorsFromRemotePoller=DHCP,NSClient,RadiusAuth,XMP
-###### DASHBOARD/SURVEILLANCE VIEW IMPLEMENTATION ######
-# OpenNMS provides two different dashboard/surveillance view implementations. The GWT
-# variant is the original. Later, the UI was rewitten using the VAADIN framework. So, the
-# two valid options fpr this option are 'vaadin' or 'gwt'. The VAADIN implementation is
-# the default one. Please note that the GWT version is deprecated and will be removed in
-# future versions.
-#org.opennms.dashboard.implementation=gwt
-
###### DASHBOARD LANDING PAGE ######
# This setting controls whether users will be redirected to the dashboard page after
# a successful login. The two valid options for this are 'true' or 'false' which is
@@ -731,6 +719,15 @@ excludeServiceMonitorsFromRemotePoller=DHCP,NSClient,RadiusAuth,XMP
# account when generating the alarm-based heatmap.
#org.opennms.heatmap.onlyUnacknowledged=false
+# ###### GEOMAP BOX ######
+# This setting controls how each node's status is calculated.
+# Valid options are Alarms or Outages
+#org.opennms.geomap.defaultStrategy=Alarms
+
+# This setting controls the node's severity to show up.
+# By default all nodes with a severity >= Normal are shown.
+#org.opennms.geomap.defaultSeverity=Normal
+
# ###### GRAFANA BOX ######
# This setting controls whether a grafana box showing the available dashboards is
# placed on the landing page. The two valid options for this are 'true' or 'false'
@@ -757,3 +754,31 @@ excludeServiceMonitorsFromRemotePoller=DHCP,NSClient,RadiusAuth,XMP
# Timeouts for contacting the grafana server
#org.opennms.grafanaBox.connectionTimeout=500
#org.opennms.grafanaBox.soTimeout=500
+
+# ###### ActiveMQ Settings ######
+# These settings are used to control which ActiveMQ broker will be used.
+# By default, we use an embedded broker.
+#
+#org.opennms.activemq.broker.disable=false
+#org.opennms.activemq.broker.url=vm://localhost?create=false
+#org.opennms.activemq.broker.username=
+#org.opennms.activemq.broker.password=
+#org.opennms.activemq.client.max-connections=8
+#org.opennms.activemq.client.concurrent-consumers=10
+
+# ###### Minion provisioning ######
+# These settings control the automatic provisioning of minions.
+#
+# Enables the provisioning mechanism
+#opennms.minion.provisioning=true
+#
+# The pattern is used to name the foreign source used to provision the minions.
+# The pattern can contain a single '%s' placeholder which is replaced with the
+# minions locations.
+#opennms.minion.provisioning.foreignSourcePattern=Minions
+
+# ###### JMS Timeout ######
+# Various OpenNMS components communicate via a message queue. These messages require a request timeout value to
+# be set. In many cases OpenNMS computes a proper timeout value for its operations. However, if a value cannot be
+# determined this default value (ms) will be used.
+#org.opennms.jms.timeout = 20000
diff --git a/org.apache.felix.fileinstall-deploy.cfg b/org.apache.felix.fileinstall-deploy.cfg
index b2aedac..7f9f0ca 100644
--- a/org.apache.felix.fileinstall-deploy.cfg
+++ b/org.apache.felix.fileinstall-deploy.cfg
@@ -22,4 +22,4 @@ felix.fileinstall.tmpdir = ${karaf.data}/generated-bundles
felix.fileinstall.poll = 1000
felix.fileinstall.start.level = 80
felix.fileinstall.active.level = 80
-felix.fileinstall.log.level = 3
\ No newline at end of file
+felix.fileinstall.log.level = 3
diff --git a/org.apache.karaf.command.acl.config.cfg b/org.apache.karaf.command.acl.config.cfg
index 542b15f..69a4705 100644
--- a/org.apache.karaf.command.acl.config.cfg
+++ b/org.apache.karaf.command.acl.config.cfg
@@ -42,4 +42,4 @@ propset = manager
propset[/.*jmx[.]acl.*/] = admin
propset[/.*org[.]apache[.]karaf[.]command[.]acl[.].+/] = admin
propset[/.*org[.]apache[.]karaf[.]service[.]acl[.].+/] = admin
-update = manager
\ No newline at end of file
+update = manager
diff --git a/org.apache.karaf.command.acl.features.cfg b/org.apache.karaf.command.acl.features.cfg
index dfe4483..175fdf9 100644
--- a/org.apache.karaf.command.acl.features.cfg
+++ b/org.apache.karaf.command.acl.features.cfg
@@ -24,4 +24,4 @@
# org.apache.karaf.command.acl.osgi.cfg
#
install = admin
-uninstall = admin
\ No newline at end of file
+uninstall = admin
diff --git a/org.apache.karaf.command.acl.jaas.cfg b/org.apache.karaf.command.acl.jaas.cfg
index 5ab39d7..5713dea 100644
--- a/org.apache.karaf.command.acl.jaas.cfg
+++ b/org.apache.karaf.command.acl.jaas.cfg
@@ -24,4 +24,4 @@
# org.apache.karaf.command.acl.osgi.cfg
#
# Jaas commands commands have no effect until update is called.
-update = admin
\ No newline at end of file
+update = admin
diff --git a/org.apache.karaf.command.acl.osgi.cfg b/org.apache.karaf.command.acl.osgi.cfg
index bbb2cdd..7852cf8 100644
--- a/org.apache.karaf.command.acl.osgi.cfg
+++ b/org.apache.karaf.command.acl.osgi.cfg
@@ -67,4 +67,4 @@ watch = admin
shutdown = admin
start-level[/.*[0-9][0-9][0-9]+.*/] = manager # manager can set startlevels above 100
start-level[/[^0-9]*/] = viewer # viewer can obtain the current start level
-start-level = admin # admin can set any start level, including < 100
\ No newline at end of file
+start-level = admin # admin can set any start level, including < 100
diff --git a/org.apache.karaf.command.acl.scope_bundle.cfg b/org.apache.karaf.command.acl.scope_bundle.cfg
index 588e3f9..5e2621f 100644
--- a/org.apache.karaf.command.acl.scope_bundle.cfg
+++ b/org.apache.karaf.command.acl.scope_bundle.cfg
@@ -31,4 +31,4 @@ log=org.apache.karaf.shell.log
packages=org.apache.karaf.shell.packages
config=org.apache.karaf.shell.config
ssh=org.apache.karaf.shell.ssh
-shell=org.apache.karaf.shell.commands
\ No newline at end of file
+shell=org.apache.karaf.shell.commands
diff --git a/org.apache.karaf.command.acl.shell.cfg b/org.apache.karaf.command.acl.shell.cfg
index 1f94e28..c47f6f5 100644
--- a/org.apache.karaf.command.acl.shell.cfg
+++ b/org.apache.karaf.command.acl.shell.cfg
@@ -26,4 +26,4 @@
edit = admin
exec = admin
new = admin
-java = admin
\ No newline at end of file
+java = admin
diff --git a/org.apache.karaf.features.cfg b/org.apache.karaf.features.cfg
index 733563b..cd42b32 100644
--- a/org.apache.karaf.features.cfg
+++ b/org.apache.karaf.features.cfg
@@ -1,7 +1,7 @@
#
# Comma separated list of features repositories to register by default
#
-featuresRepositories=mvn:org.opennms.container/karaf/18.0.4/xml/features,mvn:org.opennms.karaf/opennms/18.0.4/xml/features
+featuresRepositories=mvn:org.opennms.container/org.opennms.container.karaf/19.0.0/xml/features,mvn:org.opennms.karaf/opennms/19.0.0/xml/features
#
# Comma separated list of features to install at startup
@@ -15,12 +15,17 @@ featuresBoot=karaf-framework,ssh,config,features,management,\
opennms-jaas-login-module,\
datachoices, \
opennms-collection-commands, \
+ opennms-events-commands, \
+ opennms-icmp-commands, \
+ opennms-snmp-commands, \
opennms-topology-runtime-browsers,\
opennms-topology-runtime-linkd,\
- opennms-topology-runtime-simple,\
opennms-topology-runtime-vmware,\
opennms-topology-runtime-application,\
opennms-topology-runtime-bsm,\
+ opennms-provisioning-shell,\
+ opennms-poller-shell,\
+ opennms-topology-runtime-graphml,\
osgi-nrtg-local,\
vaadin-node-maps,\
vaadin-snmp-events-and-metrics, \
@@ -42,4 +47,5 @@ featuresBoot=karaf-framework,ssh,config,features,management,\
vaadin-opennms-pluginmanager, \
vaadin-adminpage, \
org.opennms.features.bsm.shell-commands, \
- internal-plugins-descriptor
+ internal-plugins-descriptor, \
+ geolocation
diff --git a/org.apache.karaf.features.obr.cfg b/org.apache.karaf.features.obr.cfg
index 12ba4cd..7d47c7d 100644
--- a/org.apache.karaf.features.obr.cfg
+++ b/org.apache.karaf.features.obr.cfg
@@ -35,4 +35,4 @@ startByDefault = true
#
# Defines the start level for resolved bundles. The default is 80.
#
-startLevel = 80
\ No newline at end of file
+startLevel = 80
diff --git a/org.apache.karaf.features.repos.cfg b/org.apache.karaf.features.repos.cfg
index a8cc7d4..46fa539 100644
--- a/org.apache.karaf.features.repos.cfg
+++ b/org.apache.karaf.features.repos.cfg
@@ -20,8 +20,10 @@
#
# This file describes the features repository URL for some projects
#
-cellar=mvn:org.apache.karaf.cellar/apache-karaf-cellar/[2,3)/xml/features
-cave=mvn:org.apache.karaf.cave/apache-karaf-cave/[2,3)/xml/features
+enterprise=mvn:org.apache.karaf.assemblies.features/enterprise/LATEST/xml/features
+spring=mvn:org.apache.karaf.assemblies.features/spring/LATEST/xml/features
+cellar=mvn:org.apache.karaf.cellar/apache-karaf-cellar/LATEST/xml/features
+cave=mvn:org.apache.karaf.cave/apache-karaf-cave/LATEST/xml/features
camel=mvn:org.apache.camel.karaf/apache-camel/LATEST/xml/features
camel-extras=mvn:org.apache-extras.camel-extra.karaf/camel-extra/LATEST/xml/features
cxf=mvn:org.apache.cxf.karaf/apache-cxf/LATEST/xml/features
@@ -30,4 +32,11 @@ activemq=mvn:org.apache.activemq/activemq-karaf/LATEST/xml/features
jclouds=mvn:org.apache.jclouds.karaf/jclouds-karaf/LATEST/xml/features
openejb=mvn:org.apache.openejb/openejb-feature/LATEST/xml/features
wicket=mvn:org.ops4j.pax.wicket/features/LATEST/xml/features
-hawtio=mvn:io.hawt/hawtio-karaf/LATEST/xml/features
\ No newline at end of file
+hawtio=mvn:io.hawt/hawtio-karaf/LATEST/xml/features
+pax-cdi=mvn:org.ops4j.pax.cdi/pax-cdi-features/LATEST/xml/features
+pax-jdbc=mvn:org.ops4j.pax.jdbc/pax-jdbc-features/LATEST/xml/features
+pax-jpa=mvn:org.ops4j.pax.jpa/pax-jpa-features/LATEST/xml/features
+pax-web=mvn:org.ops4j.pax.web/pax-web-features/LATEST/xml/features
+pax-wicket=mvn:org.ops4j.pax.wicket/pax-wicket-features/LATEST/xml/features
+ecf=http://download.eclipse.org/rt/ecf/latest/site.p2/karaf-features.xml
+decanter=mvn:org.apache.karaf.decanter/apache-karaf-decanter/LATEST/xml/features
diff --git a/org.apache.karaf.jaas.cfg b/org.apache.karaf.jaas.cfg
index 8ac5af6..19f3fa0 100644
--- a/org.apache.karaf.jaas.cfg
+++ b/org.apache.karaf.jaas.cfg
@@ -58,4 +58,4 @@ encryption.algorithm = MD5
# hexadecimal
# base64
#
-encryption.encoding = hexadecimal
\ No newline at end of file
+encryption.encoding = hexadecimal
diff --git a/org.apache.karaf.kar.cfg b/org.apache.karaf.kar.cfg
index 6c7cf16..0b71af2 100644
--- a/org.apache.karaf.kar.cfg
+++ b/org.apache.karaf.kar.cfg
@@ -21,4 +21,4 @@
# Enable or disable the refresh of the bundles when installing
# the features contained in a KAR file
#
-noAutoRefreshBundles=false
\ No newline at end of file
+noAutoRefreshBundles=false
diff --git a/org.apache.karaf.log.cfg b/org.apache.karaf.log.cfg
index 20ceafc..340f572 100644
--- a/org.apache.karaf.log.cfg
+++ b/org.apache.karaf.log.cfg
@@ -33,4 +33,4 @@ size = 500
# The pattern used to format the log statement when using log:display. This pattern is according
# to the log4j layout. You can override this parameter at runtime using log:display with -p.
#
-pattern = %d{ISO8601} | %-5.5p | %-16.16t | %-32.32c{1} | %-32.32C %4L | %X{bundle.id} - %X{bundle.name} - %X{bundle.version} | %m%n
\ No newline at end of file
+pattern = %d{ISO8601} | %-5.5p | %-16.16t | %-32.32c{1} | %X{bundle.id} - %X{bundle.name} - %X{bundle.version} | %m%n
diff --git a/org.apache.karaf.shell.cfg b/org.apache.karaf.shell.cfg
index 5798ad9..03d2e07 100644
--- a/org.apache.karaf.shell.cfg
+++ b/org.apache.karaf.shell.cfg
@@ -24,39 +24,38 @@
#
# Via sshPort and sshHost you define the address you can login into Karaf.
#
-sshPort=8101
-sshHost=0.0.0.0
+sshPort = 8101
+sshHost = 127.0.0.1
#
# The sshIdleTimeout defines the inactivity timeout to logout the SSH session.
# The sshIdleTimeout is in milliseconds, and the default is set to 30 minutes.
#
-sshIdleTimeout=1800000
+sshIdleTimeout = 1800000
#
# sshRealm defines which JAAS domain to use for password authentication.
#
-sshRealm=karaf
+sshRealm = karaf
#
# The location of the hostKey file defines where the private/public key of the server
# is located. If no file is at the defined location it will be ignored.
#
-hostKey=${karaf.base}/etc/host.key
-
-#
-# Role name used for SSH access authorization
-# If not set, this defaults to the ${karaf.admin.role} configured in etc/system.properties
-#
-# sshRole=admin
+hostKey = ${karaf.etc}/host.key
#
# Self defined key size in 1024, 2048, 3072, or 4096
# If not set, this defaults to 1024.
#
-# keySize=1024
+# keySize = 1024
#
# Specify host key algorithm, defaults to DSA
#
-# algorithm=DSA
+# algorithm = DSA
+
+# Specify an additional welcome banner to be displayed when a user logs into the server.
+#
+# welcomeBanner =
+
diff --git a/org.opennms.features.geocoder.google.cfg b/org.opennms.features.geocoder.google.cfg
index 84b564b..6f59a70 100644
--- a/org.opennms.features.geocoder.google.cfg
+++ b/org.opennms.features.geocoder.google.cfg
@@ -1,2 +1,3 @@
clientId=
clientKey=
+timeout=500
diff --git a/org.opennms.features.topology.app.cfg b/org.opennms.features.topology.app.cfg
index 3467e11..9f020a1 100644
--- a/org.opennms.features.topology.app.cfg
+++ b/org.opennms.features.topology.app.cfg
@@ -1,7 +1,5 @@
-servletAlias = /topology
org.apache.karaf.features.configKey = org.opennms.features.topology.app
-theme = topo_default
-widgetset = org.opennms.features.topology.widgetset.gwt.TopologyWidgetset
showHeader = true
autoRefresh.enabled = false
autoRefresh.interval = 60
+resolveCoordinatesFromAddressString = true
diff --git a/org.opennms.features.topology.app.icons.application.cfg b/org.opennms.features.topology.app.icons.application.cfg
index 1dec86b..34a4bf1 100644
--- a/org.opennms.features.topology.app.icons.application.cfg
+++ b/org.opennms.features.topology.app.icons.application.cfg
@@ -1,3 +1,3 @@
# Application Topology
application.application = business_service
-application.monitored-service = IP_service
\ No newline at end of file
+application.monitored-service = IP_service
diff --git a/org.opennms.features.topology.app.icons.bsm.cfg b/org.opennms.features.topology.app.icons.bsm.cfg
index 8c948d6..abcd2af 100644
--- a/org.opennms.features.topology.app.icons.bsm.cfg
+++ b/org.opennms.features.topology.app.icons.bsm.cfg
@@ -1,4 +1,4 @@
# Business Service Topology
bsm.business-service = business_service
bsm.ip-service = IP_service
-bsm.reduction-key = reduction_key
\ No newline at end of file
+bsm.reduction-key = reduction_key
diff --git a/org.opennms.features.topology.app.icons.list b/org.opennms.features.topology.app.icons.list
index 4fb412c..4344248 100644
--- a/org.opennms.features.topology.app.icons.list
+++ b/org.opennms.features.topology.app.icons.list
@@ -13,24 +13,36 @@ linux_file_server
opennms_server
cloud
+# atlas icons (since 19.0.0)
+microwave_backhaul_1
+microwave_backhaul_2
+region_1
+region_2
+market_1
+market_2
+site_1
+site_2
+site_3
+
# BSM Icons
IP_service
business_service
reduction_key
# vmware_icons
+vmware-datacenter
vmware-cluster
-vmware-datastore
+vmware-hostsystem-on
+vmware-hostsystem-off
+vmware-hostsystem-standby
vmware-hostsystem-unknown
-vmware-virtualmachine-off
+vmware-network
+vmware-datastore
vmware-virtualmachine-on
-vmware-hostsystem-off
+vmware-virtualmachine-off
vmware-virtualmachine-suspended
-vmware-hostsystem-standby
-vmware-hostsystem-on
vmware-virtualmachine-unknown
-vmware-datacenter
-vmware-network
+
# default_icons (legacy, before 18.0.0)
legacy_linux_file_server
diff --git a/org.opennms.features.topology.app.icons.sfree.cfg b/org.opennms.features.topology.app.icons.sfree.cfg
index fa8f401..d3718d9 100644
--- a/org.opennms.features.topology.app.icons.sfree.cfg
+++ b/org.opennms.features.topology.app.icons.sfree.cfg
@@ -1,3 +1,3 @@
# Scale Free Topology
sfree.group = cloud
-sfree.system = vmware-network
\ No newline at end of file
+sfree.system = vmware-network
diff --git a/org.opennms.features.topology.app.icons.vmware.cfg b/org.opennms.features.topology.app.icons.vmware.cfg
index 7a7a977..90e5d9e 100644
--- a/org.opennms.features.topology.app.icons.vmware.cfg
+++ b/org.opennms.features.topology.app.icons.vmware.cfg
@@ -9,4 +9,4 @@ vmware.VIRTUALMACHINE_ICON_OFF = vmware-virtualmachine-off
vmware.VIRTUALMACHINE_ICON_SUSPENDED = vmware-virtualmachine-suspended
vmware.VIRTUALMACHINE_ICON_UNKNOWN = vmware-virtualmachine-unknown
vmware.DATASTORE_ICON = vmware-datastore
-vmware.DATACENTER_ICON = vmware-datacenter
\ No newline at end of file
+vmware.DATACENTER_ICON = vmware-datacenter
diff --git a/org.ops4j.pax.logging.cfg b/org.ops4j.pax.logging.cfg
index b9acb61..daf8961 100644
--- a/org.ops4j.pax.logging.cfg
+++ b/org.ops4j.pax.logging.cfg
@@ -21,14 +21,14 @@
log4j.rootLogger=INFO, out, osgi:*
log4j.throwableRenderer=org.apache.log4j.OsgiThrowableRenderer
-# To avoid flooding the log when using WARN level on an ssh connection and doing log:tail
+# To avoid flooding the log when using DEBUG level on an ssh connection and doing log:tail
log4j.logger.org.apache.sshd.server.channel.ChannelSession = INFO
# Without this, a TON of topology logs go to output.log
log4j.logger.org.opennms.features.topology = WARN
-# Display all WARN logs for our code
-log4j.category.org.opennms=WARN, out, osgi:*
+# Display all DEBUG logs for our code
+log4j.category.org.opennms=DEBUG, out, osgi:*
log4j.additivity.org.opennms=false
# CONSOLE appender not used by default
@@ -40,7 +40,7 @@ log4j.appender.stdout.layout.ConversionPattern=%d %-5p %X{bundle.name}:%X{bundle
log4j.appender.out=org.apache.log4j.RollingFileAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.ConversionPattern=%d %-5p %X{bundle.name}:%X{bundle.version}(%X{bundle.id}) [%t] %c: %m%n
-log4j.appender.out.file=${karaf.data}/log/karaf.log
+log4j.appender.out.file=${karaf.base}/logs/karaf.log
log4j.appender.out.append=true
log4j.appender.out.maxFileSize=1MB
log4j.appender.out.maxBackupIndex=10
@@ -52,6 +52,6 @@ log4j.appender.sift.default=karaf
log4j.appender.sift.appender=org.apache.log4j.FileAppender
log4j.appender.sift.appender.layout=org.apache.log4j.PatternLayout
log4j.appender.sift.appender.layout.ConversionPattern=%d %-5p [%t] %c: %m%n
-log4j.appender.sift.appender.file=${karaf.data}/log/$\\{bundle.name\\}.log
+log4j.appender.sift.appender.file=${karaf.base}/logs/$\\{bundle.name\\}.log
log4j.appender.sift.appender.append=true
diff --git a/org.ops4j.pax.url.mvn.cfg b/org.ops4j.pax.url.mvn.cfg
index 5b431c4..c92ecac 100644
--- a/org.ops4j.pax.url.mvn.cfg
+++ b/org.ops4j.pax.url.mvn.cfg
@@ -101,3 +101,4 @@ org.ops4j.pax.url.mvn.repositories= \
http://repository.springsource.com/maven/bundles/release@id=springsource.release, \
http://repository.springsource.com/maven/bundles/external@id=springsource.external, \
https://oss.sonatype.org/content/repositories/releases/@id=sonatype
+
diff --git a/pluginManifestData.xml b/pluginManifestData.xml
new file mode 100644
index 0000000..0328a51
--- /dev/null
+++ b/pluginManifestData.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<pluginManagerData>
+ <pluginServerPassword>admin</pluginServerPassword>
+ <pluginServerUsername>admin</pluginServerUsername>
+ <pluginServerUrl>http://localhost:8980/opennms</pluginServerUrl>
+ <licenceShoppingCartUrl>http://opennms.org</licenceShoppingCartUrl>
+ <karafDataMap/>
+ <karafManifestEntryMap/>
+</pluginManagerData>
diff --git a/poller-config.properties b/poller-config.properties
deleted file mode 100644
index 7f3b4b3..0000000
--- a/poller-config.properties
+++ /dev/null
@@ -1,75 +0,0 @@
-services=SMTP,FTP,IMAP,POP3,TCP,HTTP,HTTPS,NTP
-interval=300000
-timeout=3000
-banner=*
-
-# JBoss
-service.JBoss4.monitor=org.opennms.netmgt.poller.monitors.JBoss4Monitor
-service. JBoss4.capsd-class=org.opennms.netmgt.capsd.plugins.JBoss4Plugin
-service. JBoss4.protocol=JBoss4
-service. JBoss4.port=1099
-
-service.JBoss32.monitor=org.opennms.netmgt.poller.monitors.JBoss32Monitor
-service. JBoss32.capsd-class=org.opennms.netmgt.capsd.plugins.JBoss32Plugin
-service. JBoss32.protocol=JBoss32
-service. JBoss32.port=1099
-
-# SMTP
-service.SMTP.monitor=org.opennms.netmgt.poller.monitors.SmtpMonitor
-service.SMTP.capsd-class=org.opennms.netmgt.capsd.plugins.SmtpPlugin
-service.SMTP.protocol=SMTP
-service.SMTP.port=25
-
-# FTP
-service.FTP.monitor=org.opennms.netmgt.poller.monitors.FtpMonitor
-service.FTP.capsd-class=org.opennms.netmgt.capsd.plugins.FtpPlugin
-service.FTP.protocol=FTP
-service.FTP.port=21
-
-# Postgres
-service.Postgres.monitor=org.opennms.netmgt.poller.monitors.TcpMonitor
-service.Postgres.capsd-class=org.opennms.netmgt.capsd.plugins.TcpPlugin
-service.Postgres.protocol=Postgres
-service.Postgres.port=5432
-
-# MySQL
-#service.MySQL.monitor=org.opennms.netmgt.poller.monitors.TcpMonitor
-#service.MySQL.capsd-class=org.opennms.netmgt.capsd.plugins.TcpPlugin
-#service.MySQL.protocol=MySQL
-#service.MySQL.port=
-
-# IMAP
-service.IMAP.monitor=org.opennms.netmgt.poller.monitors.ImapMonitor
-service.IMAP.capsd-class=org.opennms.netmgt.capsd.plugins.ImapPlugin
-service.IMAP.protocol=IMAP
-service.IMAP.port=143
-
-# POP3
-service.POP3.monitor=org.opennms.netmgt.poller.monitors.Pop3Montior
-service.POP3.capsd-class=org.opennms.netmgt.capsd.plugins.Pop3Plugin
-service.POP3.protocol=POP3
-service.POP3.port=110
-
-# TCP
-service.TCP.monitor=org.opennms.netmgt.poller.monitors.TcpMonitor
-service.TCP.capsd-class=org.opennms.netmgt.capsd.plugins.TcpPlugin
-service.TCP.protocol=TCP
-service.TCP.port=
-
-# HTTP
-service.HTTP.monitor=org.opennms.netmgt.poller.monitors.HttpMonitor
-service.HTTP.capsd-class=org.opennms.netmgt.capsd.plugins.HttpPlugin
-service.HTTP.protocol=HTTP
-service.HTTP.port=80:8080:8088
-
-# HTTPS
-service.HTTPS.monitor=org.opennms.netmgt.poller.monitors.HttpsMonitor
-service.HTTPS.capsd-class=org.opennms.netmgt.capsd.plugins.HttpsPlugin
-service.HTTPS.protocol=HTTPS
-service.HTTPS.port=
-
-# NTP
-service.NTP.monitor=org.opennms.netmgt.poller.monitors.NtpMonitor
-service.NTP.capsd-class=org.opennms.netmgt.capsd.plugins.NtpPlugin
-service.NTP.protocol=NTP
-service.NTP.port=123
diff --git a/poller-configuration.xml b/poller-configuration.xml
index 6e7a452..47d8dbf 100644
--- a/poller-configuration.xml
+++ b/poller-configuration.xml
@@ -8,8 +8,8 @@
<critical-service name="ICMP" />
</node-outage>
- <package name="cassandra21x">
- <filter><![CDATA[(IPADDR != '0.0.0.0') & (categoryName == 'Cassandra21x')]]></filter>
+ <package name="cassandra-via-jmx">
+ <filter>IPADDR != '0.0.0.0'</filter>
<rrd step="300">
<rra>RRA:AVERAGE:0.5:1:2016</rra>
<rra>RRA:AVERAGE:0.5:12:1488</rra>
@@ -23,9 +23,8 @@
<parameter key="timeout" value="3000"/>
<parameter key="protocol" value="rmi"/>
<parameter key="urlPath" value="/jmxrmi"/>
- <parameter key="ds-name" value="cassandra21x"/>
- <parameter key="friendly-name" value="cassandra21x"/>
- <parameter key="collection" value="cassandra21x"/>
+ <parameter key="rrd-base-name" value="jmx-cassandra"/>
+ <parameter key="ds-name" value="jmx-cassandra"/>
<parameter key="thresholding-enabled" value="true"/>
<parameter key="factory" value="PASSWORD-CLEAR"/>
<parameter key="username" value="cassandra-username"/>
@@ -36,31 +35,14 @@
<parameter key="tests.joined" value="storage.Joined"/>
<parameter key="tests.unreachables" value="empty(storage.UnreachableNodes)"/>
</service>
- <downtime interval="30000" begin="0" end="300000" /><!-- 30s, 0, 5m -->
- <downtime interval="300000" begin="300000" end="43200000" /><!-- 5m, 5m, 12h -->
- <downtime interval="600000" begin="43200000" end="432000000" /><!-- 10m, 12h, 5d -->
- <downtime begin="432000000" delete="true" /><!-- anything after 5 days delete -->
- </package>
-
- <package name="cassandra21x-newts">
- <filter><![CDATA[(IPADDR != '0.0.0.0') & (catincCassandra21x & catincNewts)]]></filter>
- <rrd step="300">
- <rra>RRA:AVERAGE:0.5:1:2016</rra>
- <rra>RRA:AVERAGE:0.5:12:1488</rra>
- <rra>RRA:AVERAGE:0.5:288:366</rra>
- <rra>RRA:MAX:0.5:288:366</rra>
- <rra>RRA:MIN:0.5:288:366</rra>
- </rrd>
<service name="JMX-Cassandra-Newts" interval="300000" user-defined="false" status="on">
<parameter key="port" value="7199"/>
<parameter key="retry" value="2"/>
<parameter key="timeout" value="3000"/>
<parameter key="protocol" value="rmi"/>
<parameter key="urlPath" value="/jmxrmi"/>
- <parameter key="rrd-base-name" value="cassandra21x-newts"/>
- <parameter key="ds-name" value="cassandra21x-newts"/>
- <parameter key="friendly-name" value="cassandra21x-newts"/>
- <parameter key="collection" value="cassandra21x-newts"/>
+ <parameter key="rrd-base-name" value="jmx-cassandra-newts"/>
+ <parameter key="ds-name" value="jmx-cassandra-newts"/>
<parameter key="thresholding-enabled" value="true"/>
<parameter key="factory" value="PASSWORD-CLEAR"/>
<parameter key="username" value="cassandra-username"/>
@@ -109,6 +91,14 @@
<parameter key="rrd-base-name" value="dns" />
<parameter key="ds-name" value="dns" />
</service>
+ <service name="Elasticsearch" interval="300000" user-defined="false" status="on">
+ <parameter key="retry" value="1"/>
+ <parameter key="timeout" value="3000"/>
+ <parameter key="port" value="9200"/>
+ <parameter key="url" value="/_cluster/stats"/>
+ <parameter key="response" value="200-202,299"/>
+ <parameter key="response-text" value="~.*status.:.green.*"/>
+ </service>
<service name="SMTP" interval="300000" user-defined="false" status="on">
<parameter key="retry" value="1" />
<parameter key="timeout" value="3000" />
@@ -258,6 +248,25 @@
<parameter key="timeout" value="3000"/>
<parameter key="rrd-repository" value="/opt/opennms/share/rrd/response" />
</service>
+ <service name="JMX-Minion" interval="300000" user-defined="false" status="on">
+ <parameter key="port" value="1299"/>
+ <parameter key="retry" value="2"/>
+ <parameter key="timeout" value="3000"/>
+ <parameter key="urlPath" value="/karaf-minion"/>
+ <parameter key="factory" value="PASSWORD-CLEAR"/>
+ <parameter key="username" value="admin"/>
+ <parameter key="password" value="admin"/>
+ <parameter key="rrd-repository" value="/opt/opennms/share/rrd/response" />
+ </service>
+ <service name="JMX-Kafka" interval="300000" user-defined="false" status="on">
+ <parameter key="port" value="9999"/>
+ <parameter key="retry" value="2"/>
+ <parameter key="timeout" value="3000"/>
+ <parameter key="factory" value="PASSWORD-CLEAR"/>
+ <parameter key="username" value="admin"/>
+ <parameter key="password" value="admin"/>
+ <parameter key="rrd-repository" value="/opt/opennms/share/rrd/response" />
+ </service>
<service name="VMwareCim-HostSystem" interval="300000" user-defined="false" status="on">
<parameter key="retry" value="2"/>
<parameter key="timeout" value="3000"/>
@@ -274,6 +283,9 @@
<parameter key="port" value="3389" />
<parameter key="timeout" value="3000" />
</service>
+ <service name="Minion-Heartbeat" interval="30000" user-defined="false" status="on">
+ <parameter key="period" value="30000" /> <!-- Service interval should be same as period -->
+ </service>
<downtime interval="30000" begin="0" end="300000" /><!-- 30s, 0, 5m -->
<downtime interval="300000" begin="300000" end="43200000" /><!-- 5m, 5m, 12h -->
@@ -324,10 +336,12 @@
<monitor service="HypericHQ" class-name="org.opennms.netmgt.poller.monitors.PageSequenceMonitor" />
<monitor service="SMTP" class-name="org.opennms.netmgt.poller.monitors.SmtpMonitor" />
<monitor service="DNS" class-name="org.opennms.netmgt.poller.monitors.DnsMonitor" />
+ <monitor service="Elasticsearch" class-name="org.opennms.netmgt.poller.monitors.HttpMonitor" />
<monitor service="FTP" class-name="org.opennms.netmgt.poller.monitors.FtpMonitor" />
<monitor service="SNMP" class-name="org.opennms.netmgt.poller.monitors.SnmpMonitor" />
<monitor service="Oracle" class-name="org.opennms.netmgt.poller.monitors.TcpMonitor" />
<monitor service="Postgres" class-name="org.opennms.netmgt.poller.monitors.TcpMonitor" />
+ <monitor service="Minion-Heartbeat" class-name="org.opennms.netmgt.poller.monitors.MinionHeartbeatMonitor" />
<monitor service="MySQL" class-name="org.opennms.netmgt.poller.monitors.TcpMonitor" />
<monitor service="SQLServer" class-name="org.opennms.netmgt.poller.monitors.TcpMonitor" />
<monitor service="SSH" class-name="org.opennms.netmgt.poller.monitors.SshMonitor" />
@@ -337,6 +351,8 @@
<monitor service="NRPE-NoSSL" class-name="org.opennms.netmgt.poller.monitors.NrpeMonitor" />
<monitor service="Windows-Task-Scheduler" class-name="org.opennms.netmgt.poller.monitors.Win32ServiceMonitor" />
<monitor service="OpenNMS-JVM" class-name="org.opennms.netmgt.poller.monitors.Jsr160Monitor" />
+ <monitor service="JMX-Minion" class-name="org.opennms.netmgt.poller.monitors.Jsr160Monitor" />
+ <monitor service="JMX-Kafka" class-name="org.opennms.netmgt.poller.monitors.Jsr160Monitor" />
<monitor service="VMwareCim-HostSystem" class-name="org.opennms.netmgt.poller.monitors.VmwareCimMonitor"/>
<monitor service="VMware-ManagedEntity" class-name="org.opennms.netmgt.poller.monitors.VmwareMonitor"/>
<monitor service="MS-RDP" class-name="org.opennms.netmgt.poller.monitors.TcpMonitor" />
diff --git a/report-templates/AssetManagementMaintExpired.jasper b/report-templates/AssetManagementMaintExpired.jasper
index 3ed64ae..00ef70a 100644
Binary files a/report-templates/AssetManagementMaintExpired.jasper and b/report-templates/AssetManagementMaintExpired.jasper differ
diff --git a/report-templates/AssetManagementMaintStrategy.jasper b/report-templates/AssetManagementMaintStrategy.jasper
index 8c1094a..cdfa4e0 100644
Binary files a/report-templates/AssetManagementMaintStrategy.jasper and b/report-templates/AssetManagementMaintStrategy.jasper differ
diff --git a/report-templates/AvailabilitySummary.jasper b/report-templates/AvailabilitySummary.jasper
index abc12a5..50b024b 100644
Binary files a/report-templates/AvailabilitySummary.jasper and b/report-templates/AvailabilitySummary.jasper differ
diff --git a/report-templates/AveragePeakTrafficRates.jasper b/report-templates/AveragePeakTrafficRates.jasper
index 561aa8e..1262858 100644
Binary files a/report-templates/AveragePeakTrafficRates.jasper and b/report-templates/AveragePeakTrafficRates.jasper differ
diff --git a/report-templates/DiskUsageForCTX.jasper b/report-templates/DiskUsageForCTX.jasper
index d9fbcbe..1024ad1 100644
Binary files a/report-templates/DiskUsageForCTX.jasper and b/report-templates/DiskUsageForCTX.jasper differ
diff --git a/report-templates/Early-Morning-Report.jasper b/report-templates/Early-Morning-Report.jasper
index 30983a8..0148fbb 100644
Binary files a/report-templates/Early-Morning-Report.jasper and b/report-templates/Early-Morning-Report.jasper differ
diff --git a/report-templates/EventAnalysis.jasper b/report-templates/EventAnalysis.jasper
index ef016f6..888cfad 100644
Binary files a/report-templates/EventAnalysis.jasper and b/report-templates/EventAnalysis.jasper differ
diff --git a/report-templates/InterfaceAvailabilityReport.jasper b/report-templates/InterfaceAvailabilityReport.jasper
index ad4857f..12b02d3 100644
Binary files a/report-templates/InterfaceAvailabilityReport.jasper and b/report-templates/InterfaceAvailabilityReport.jasper differ
diff --git a/report-templates/NodeAvailabilityReport.jasper b/report-templates/NodeAvailabilityReport.jasper
index ed762f3..e8c2f07 100644
Binary files a/report-templates/NodeAvailabilityReport.jasper and b/report-templates/NodeAvailabilityReport.jasper differ
diff --git a/report-templates/ResponseTime.jasper b/report-templates/ResponseTime.jasper
index d70d0fb..d1be959 100644
Binary files a/report-templates/ResponseTime.jasper and b/report-templates/ResponseTime.jasper differ
diff --git a/report-templates/ResponseTimeCharts.jasper b/report-templates/ResponseTimeCharts.jasper
index 0e558ed..3a8f627 100644
Binary files a/report-templates/ResponseTimeCharts.jasper and b/report-templates/ResponseTimeCharts.jasper differ
diff --git a/report-templates/ResponseTimeSummary.jasper b/report-templates/ResponseTimeSummary.jasper
index 16afc5c..c85473d 100644
Binary files a/report-templates/ResponseTimeSummary.jasper and b/report-templates/ResponseTimeSummary.jasper differ
diff --git a/report-templates/SerialInterfaceUtilizationSummary.jasper b/report-templates/SerialInterfaceUtilizationSummary.jasper
index 78a47c1..1dfa18d 100644
Binary files a/report-templates/SerialInterfaceUtilizationSummary.jasper and b/report-templates/SerialInterfaceUtilizationSummary.jasper differ
diff --git a/report-templates/SnmpInterfaceOperAvailabilityReport.jasper b/report-templates/SnmpInterfaceOperAvailabilityReport.jasper
index 2172fb3..81759a9 100644
Binary files a/report-templates/SnmpInterfaceOperAvailabilityReport.jasper and b/report-templates/SnmpInterfaceOperAvailabilityReport.jasper differ
diff --git a/report-templates/TopIOWait.jasper b/report-templates/TopIOWait.jasper
new file mode 100644
index 0000000..f7a0f66
Binary files /dev/null and b/report-templates/TopIOWait.jasper differ
diff --git a/report-templates/TopIOWait.jrxml b/report-templates/TopIOWait.jrxml
new file mode 100644
index 0000000..71b194b
--- /dev/null
+++ b/report-templates/TopIOWait.jrxml
@@ -0,0 +1,271 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Created with Jaspersoft Studio version 6.1.1.final using JasperReports Library version 6.1.1 -->
+<!-- 2016-09-14T12:01:14 -->
+<jasperReport xmlns="http://jasperreports.sourceforge.net/jasperreports" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://jasperreports.sourceforge.net/jasperreports http://jasperreports.sourceforge.net/xsd/jasperreport.xsd" name="Average and Peak Traffic Rates" pageWidth="595" pageHeight="842" whenNoDataType="NoDataSection" columnWidth="555" leftMargin="20" rightMargin="20" topMargin="20" bottomMargin="20" uuid="dd0c0584-c0b6-4860-a6fd-62182c4d11a5">
+ <property name="ireport.zoom" value="1.2100000000000006"/>
+ <property name="ireport.x" value="0"/>
+ <property name="ireport.y" value="0"/>
+ <property name="com.jaspersoft.studio.unit." value="pixel"/>
+ <property name="com.jaspersoft.studio.data.sql.tables" value=""/>
+ <property name="com.jaspersoft.studio.data.defaultdataadapter" value="opennms"/>
+ <template><![CDATA[$P{ONMS_REPORT_DIR} + "/assets/styles/defaultStyles.jrtx"]]></template>
+ <style name="Report_Title" forecolor="#000000" fontSize="20"/>
+ <style name="Report_Subtitle" forecolor="#000000" vTextAlign="Middle" vImageAlign="Middle" fontSize="10" isBold="false" isItalic="true" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Table_Detail" hTextAlign="Left" hImageAlign="Left" vTextAlign="Middle" vImageAlign="Middle" fontName="SansSerif" fontSize="10" isBold="false" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Table_Grid" mode="Transparent" forecolor="#FFFFFF" isBold="false" isItalic="false" isUnderline="false" isStrikeThrough="false">
+ <pen lineWidth="0.0" lineColor="#FFFFFF"/>
+ <box>
+ <bottomPen lineWidth="1.0"/>
+ </box>
+ <conditionalStyle>
+ <conditionExpression><![CDATA[new Boolean($V{style_helper_COUNT}%new Integer("5") == new Integer("0"))]]></conditionExpression>
+ <style mode="Opaque" forecolor="#999999">
+ <box>
+ <bottomPen lineWidth="1.0"/>
+ </box>
+ </style>
+ </conditionalStyle>
+ </style>
+ <style name="Page_Footer" fontSize="10" isBold="false" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Table_Header" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Table_Header_BG" mode="Opaque" backcolor="#CCFFCC"/>
+ <style name="Table_Surveillance_Cat" forecolor="#000000" vTextAlign="Middle" vImageAlign="Middle" fontSize="10" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Table_Surveillance_Cat_BG" mode="Opaque" backcolor="#DFDFDF"/>
+ <style name="Table_Surveillance_Cat_Footer_BG" mode="Transparent" backcolor="#FFFFFF">
+ <pen lineWidth="1.0" lineStyle="Double"/>
+ </style>
+ <style name="Table_Surveillance_Cat_Footer" hTextAlign="Center" hImageAlign="Center" vTextAlign="Middle" vImageAlign="Middle" fontSize="10" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false">
+ <box>
+ <bottomPen lineWidth="0.0" lineStyle="Double" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="Table_Surveillance_Cat_Footer_Line" hTextAlign="Center" hImageAlign="Center" vTextAlign="Middle" vImageAlign="Middle" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false">
+ <box>
+ <bottomPen lineWidth="2.0" lineStyle="Double" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="Surveillance_Category_Group" mode="Opaque" backcolor="#CCFFCC" hTextAlign="Left" hImageAlign="Left" vTextAlign="Middle" vImageAlign="Middle" fontSize="12" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="Node_Group" mode="Opaque" backcolor="#DFDFDF" hTextAlign="Left" hImageAlign="Left" vTextAlign="Middle" vImageAlign="Middle" fontSize="10" isBold="false" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <style name="table">
+ <box>
+ <pen lineWidth="1.0" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="table_TH" mode="Opaque" backcolor="#F0F8FF">
+ <box>
+ <topPen lineWidth="0.5" lineColor="#000000"/>
+ <bottomPen lineWidth="0.5" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="table_CH" mode="Opaque" backcolor="#BFE1FF">
+ <box>
+ <topPen lineWidth="0.5" lineColor="#000000"/>
+ <bottomPen lineWidth="0.5" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="table_TD" mode="Opaque" backcolor="#FFFFFF">
+ <box>
+ <topPen lineWidth="0.5" lineColor="#000000"/>
+ <bottomPen lineWidth="0.5" lineColor="#000000"/>
+ </box>
+ </style>
+ <style name="style1"/>
+ <style name="Interface_Header" hTextAlign="Center" hImageAlign="Center" vTextAlign="Middle" vImageAlign="Middle" isBold="true" isItalic="false" isUnderline="false" isStrikeThrough="false"/>
+ <parameter name="ONMS_REPORT_DIR" class="java.lang.String" isForPrompting="false">
+ <parameterDescription><![CDATA[The directory where all reports can be found]]></parameterDescription>
+ <defaultValueExpression><![CDATA["/opt/opennms/etc/report-templates"]]></defaultValueExpression>
+ </parameter>
+ <parameter name="COMPANY_LOGO" class="java.lang.String" isForPrompting="false">
+ <defaultValueExpression><![CDATA[$P{ONMS_REPORT_DIR} + "/assets/images/company-logo.png"]]></defaultValueExpression>
+ </parameter>
+ <parameter name="SUBREPORT_DIR" class="java.lang.String" isForPrompting="false">
+ <defaultValueExpression><![CDATA[$P{ONMS_REPORT_DIR} + "/subreports/"]]></defaultValueExpression>
+ </parameter>
+ <queryString>
+ <![CDATA[select
+n.nodeid, n.nodelabel, n.nodesyscontact, n.nodesysdescription, n.nodesyslocation, d.value
+from
+ node n,
+ (select * from statisticsreport order by id desc limit 1) s,
+ statisticsreportdata d,
+ resourcereference r
+where
+ s.id=d.reportid
+ and s.name='TopN_IOWait'
+ and d.resourceid=r.id
+ and r.resourceid='nodeSource['||n.foreignsource||'%3A'||n.foreignid||'].nodeSnmp[]'
+order by d.value desc]]>
+ </queryString>
+ <field name="nodeid" class="java.lang.Integer"/>
+ <field name="nodelabel" class="java.lang.String"/>
+ <field name="nodesyscontact" class="java.lang.String"/>
+ <field name="nodesysdescription" class="java.lang.String"/>
+ <field name="nodesyslocation" class="java.lang.String"/>
+ <field name="value" class="java.lang.Double"/>
+ <background>
+ <band splitType="Stretch"/>
+ </background>
+ <title>
+ <band height="4" splitType="Stretch"/>
+ </title>
+ <pageHeader>
+ <band height="80" splitType="Stretch">
+ <staticText>
+ <reportElement style="Title" x="0" y="0" width="355" height="30" uuid="e2bbd5aa-7bb3-4da8-9975-db0696053b3e">
+ <property name="local_mesure_unitwidth" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.width" value="px"/>
+ <property name="local_mesure_unitheight" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.height" value="px"/>
+ </reportElement>
+ <textElement verticalAlignment="Middle">
+ <font size="20" isBold="true"/>
+ </textElement>
+ <text><![CDATA[Top 20 nodes by I/O Wait]]></text>
+ </staticText>
+ 
+ <staticText>
+ <reportElement x="0" y="30" width="354" height="30" uuid="49028b4e-cd3b-4a4f-b33c-f70c20e4a543"/>
+ <text><![CDATA[Average for the last 24 hours.]]></text>
+ </staticText>
+ </band>
+ </pageHeader>
+ <columnHeader>
+ <band height="19" splitType="Stretch">
+ <line>
+ <reportElement x="0" y="0" width="555" height="1" uuid="c5cc50fc-34f3-4f66-a76a-c648728b32a8">
+ <property name="local_mesure_unitx" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.x" value="px"/>
+ <property name="local_mesure_unitwidth" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.width" value="px"/>
+ </reportElement>
+ <graphicElement>
+ <pen lineWidth="1.5"/>
+ </graphicElement>
+ </line>
+ <staticText>
+ <reportElement x="0" y="1" width="140" height="18" uuid="405bc934-e220-4d23-a4bd-64377740d5ca"/>
+ <textElement textAlignment="Center">
+ <font isBold="true"/>
+ </textElement>
+ <text><![CDATA[Node Label]]></text>
+ </staticText>
+ <staticText>
+ <reportElement x="140" y="1" width="280" height="18" uuid="42f8b403-c7e6-4aa3-9936-749fac94cb66"/>
+ <textElement textAlignment="Center">
+ <font isBold="true"/>
+ </textElement>
+ <text><![CDATA[Node details]]></text>
+ </staticText>
+ <staticText>
+ <reportElement x="440" y="1" width="114" height="18" uuid="b68e8040-9386-4658-b857-dc354a53b2dc"/>
+ <textElement textAlignment="Right">
+ <font isBold="true"/>
+ </textElement>
+ <text><![CDATA[ I/O Wait (raw)]]></text>
+ </staticText>
+ </band>
+ </columnHeader>
+ <detail>
+ <band height="22" splitType="Stretch">
+ <textField>
+ <reportElement x="0" y="0" width="140" height="16" uuid="27a3911b-2cb4-4f8c-8113-cd87a3b7ddca"/>
+ <textFieldExpression><![CDATA[$F{nodelabel}]]></textFieldExpression>
+ </textField>
+ <textField>
+ <reportElement x="140" y="0" width="280" height="16" forecolor="#9C9C9C" uuid="ee591f75-53af-49d4-96aa-ef1022246013"/>
+ <textFieldExpression><![CDATA[$F{nodesysdescription}]]></textFieldExpression>
+ </textField>
+ <textField pattern="#0">
+ <reportElement x="440" y="0" width="114" height="16" uuid="127ebdb6-ffa0-4571-bfb9-ce004b14b246"/>
+ <textElement textAlignment="Right"/>
+ <textFieldExpression><![CDATA[$F{value}]]></textFieldExpression>
+ </textField>
+ </band>
+ </detail>
+ <columnFooter>
+ <band height="259" splitType="Stretch">
+ <property name="com.jaspersoft.studio.unit.height" value="pixel"/>
+ <lineChart>
+ <chart evaluationTime="Report">
+ <reportElement x="0" y="0" width="554" height="258" uuid="1418a549-330a-4a9e-87fb-0d2448131ef4"/>
+ <chartTitle/>
+ <chartSubtitle/>
+ <chartLegend/>
+ </chart>
+ <categoryDataset>
+ <categorySeries>
+ <seriesExpression><![CDATA["I/O WAIT"]]></seriesExpression>
+ <categoryExpression><![CDATA[$F{nodelabel}]]></categoryExpression>
+ <valueExpression><![CDATA[$F{value}]]></valueExpression>
+ <labelExpression><![CDATA[$F{nodelabel}]]></labelExpression>
+ </categorySeries>
+ </categoryDataset>
+ <linePlot>
+ <plot labelRotation="45.0"/>
+ <categoryAxisFormat labelRotation="45.0">
+ <axisFormat/>
+ </categoryAxisFormat>
+ <valueAxisFormat>
+ <axisFormat/>
+ </valueAxisFormat>
+ </linePlot>
+ </lineChart>
+ </band>
+ </columnFooter>
+ <pageFooter>
+ <band height="35" splitType="Stretch">
+ <property name="local_mesure_unitheight" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.height" value="px"/>
+ <line>
+ <reportElement x="0" y="10" width="554" height="1" uuid="ec45ea9b-d9e4-4343-9cf9-512b9db8e881"/>
+ <graphicElement>
+ <pen lineWidth="1.5"/>
+ </graphicElement>
+ </line>
+ <textField>
+ <reportElement style="Paging-Footer" x="451" y="13" width="80" height="20" uuid="eb684bd0-d9e6-4a31-a516-b4edf17125d7"/>
+ <textElement textAlignment="Right"/>
+ <textFieldExpression><![CDATA["Page "+$V{PAGE_NUMBER}+" of"]]></textFieldExpression>
+ </textField>
+ <textField evaluationTime="Report">
+ <reportElement style="Paging-Footer" x="531" y="13" width="24" height="20" uuid="647f6ab1-e658-4c3c-b0d7-356b2366cba4"/>
+ <textFieldExpression><![CDATA[" " + $V{PAGE_NUMBER}]]></textFieldExpression>
+ </textField>
+ <textField pattern="yyyy/MM/dd HH:mm:ss">
+ <reportElement style="Creation-Date" x="0" y="13" width="355" height="20" uuid="25086ab7-3068-458e-8feb-e35e07b2b9e5">
+ <property name="local_mesure_unitwidth" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.width" value="px"/>
+ </reportElement>
+ <textFieldExpression><![CDATA[new java.util.Date()]]></textFieldExpression>
+ </textField>
+ </band>
+ </pageFooter>
+ <noData>
+ <band height="155">
+ <line>
+ <reportElement x="0" y="80" width="555" height="1" uuid="3904dc8d-463e-4824-b19c-5663db0bc38a">
+ <property name="local_mesure_unity" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.y" value="px"/>
+ </reportElement>
+ <graphicElement>
+ <pen lineWidth="1.5"/>
+ </graphicElement>
+ </line>
+ <textField>
+ <reportElement x="0" y="85" width="555" height="59" uuid="5fa4caf7-f9b2-4ab4-8570-ecd5b55057fa">
+ <property name="local_mesure_unity" value="pixel"/>
+ <property name="com.jaspersoft.studio.unit.y" value="px"/>
+ </reportElement>
+ <textFieldExpression><![CDATA["There is no data for this report yet."]]></textFieldExpression>
+ </textField>
+ </band>
+ </noData>
+</jasperReport>
diff --git a/report-templates/TotalBytesTransferredByInterface.jasper b/report-templates/TotalBytesTransferredByInterface.jasper
index ee677ff..ad3afff 100644
Binary files a/report-templates/TotalBytesTransferredByInterface.jasper and b/report-templates/TotalBytesTransferredByInterface.jasper differ
diff --git a/report-templates/sample-report.jasper b/report-templates/sample-report.jasper
index fabefe3..2f2f826 100644
Binary files a/report-templates/sample-report.jasper and b/report-templates/sample-report.jasper differ
diff --git a/report-templates/subreports/95thPercentileTrafficRate_subreport.jasper b/report-templates/subreports/95thPercentileTrafficRate_subreport.jasper
index b7504cc..55ae735 100644
Binary files a/report-templates/subreports/95thPercentileTrafficRate_subreport.jasper and b/report-templates/subreports/95thPercentileTrafficRate_subreport.jasper differ
diff --git a/report-templates/subreports/AvailabilitySummaryChart_subreport.jasper b/report-templates/subreports/AvailabilitySummaryChart_subreport.jasper
index c207f8a..cfa17af 100644
Binary files a/report-templates/subreports/AvailabilitySummaryChart_subreport.jasper and b/report-templates/subreports/AvailabilitySummaryChart_subreport.jasper differ
diff --git a/report-templates/subreports/DiskUsageForCTXServers_subreport1.jasper b/report-templates/subreports/DiskUsageForCTXServers_subreport1.jasper
index c78b23a..26d0c91 100644
Binary files a/report-templates/subreports/DiskUsageForCTXServers_subreport1.jasper and b/report-templates/subreports/DiskUsageForCTXServers_subreport1.jasper differ
diff --git a/report-templates/subreports/InterfaceAvailabilityReport_subreport1.jasper b/report-templates/subreports/InterfaceAvailabilityReport_subreport1.jasper
index 8fdfd63..0657413 100644
Binary files a/report-templates/subreports/InterfaceAvailabilityReport_subreport1.jasper and b/report-templates/subreports/InterfaceAvailabilityReport_subreport1.jasper differ
diff --git a/report-templates/subreports/NodeId_to_NodeLabel_subreport.jasper b/report-templates/subreports/NodeId_to_NodeLabel_subreport.jasper
index c7a2fe1..3f6f89a 100644
Binary files a/report-templates/subreports/NodeId_to_NodeLabel_subreport.jasper and b/report-templates/subreports/NodeId_to_NodeLabel_subreport.jasper differ
diff --git a/report-templates/subreports/PeakTraffic_subreport.jasper b/report-templates/subreports/PeakTraffic_subreport.jasper
index ee62ff1..a4b951a 100644
Binary files a/report-templates/subreports/PeakTraffic_subreport.jasper and b/report-templates/subreports/PeakTraffic_subreport.jasper differ
diff --git a/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jasper b/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jasper
index 76c351b..32c1336 100644
Binary files a/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jasper and b/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jasper differ
diff --git a/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jrxml b/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jrxml
index db8005f..e1a9165 100644
--- a/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jrxml
+++ b/report-templates/subreports/ResponseTimeSummary_Availability_Offenders_subreport.jrxml
@@ -98,7 +98,7 @@ FROM
outages, service, events, ifservices
WHERE
(iflostservice, COALESCE(ifregainedservice,'$P!{END_TIME_STRING}'::TIMESTAMP)) OVERLAPS ('$P!{START_TIME_STRING}'::TIMESTAMP, '$P!{START_TIME_STRING}'::TIMESTAMP + '$P!{TIME_RANGE}'::INTERVAL) AND
- outages.serviceid = service.serviceid AND
+ outages.ifserviceid = service.serviceid AND
service.servicename = 'ICMP' AND
outages.svclosteventid = events.eventid AND
events.eventuei = 'uei.opennms.org/nodes/nodeDown')
diff --git a/report-templates/subreports/ResponseTimeSummary_Availability_subreport.jasper b/report-templates/subreports/ResponseTimeSummary_Availability_subreport.jasper
index 2079e9e..730caf9 100644
Binary files a/report-templates/subreports/ResponseTimeSummary_Availability_subreport.jasper and b/report-templates/subreports/ResponseTimeSummary_Availability_subreport.jasper differ
diff --git a/report-templates/subreports/ResponseTimeSummary_Response_Offenders_subreport.jasper b/report-templates/subreports/ResponseTimeSummary_Response_Offenders_subreport.jasper
index 3554356..58f66a9 100644
Binary files a/report-templates/subreports/ResponseTimeSummary_Response_Offenders_subreport.jasper and b/report-templates/subreports/ResponseTimeSummary_Response_Offenders_subreport.jasper differ
diff --git a/report-templates/subreports/ResponseTimeSummary_subreport.jasper b/report-templates/subreports/ResponseTimeSummary_subreport.jasper
index 9b17688..284cd26 100644
Binary files a/report-templates/subreports/ResponseTimeSummary_subreport.jasper and b/report-templates/subreports/ResponseTimeSummary_subreport.jasper differ
diff --git a/report-templates/subreports/ResponseTime_subreport1.jasper b/report-templates/subreports/ResponseTime_subreport1.jasper
index 8201404..9f678b4 100644
Binary files a/report-templates/subreports/ResponseTime_subreport1.jasper and b/report-templates/subreports/ResponseTime_subreport1.jasper differ
diff --git a/report-templates/subreports/Top25PercentDown_subreport.jasper b/report-templates/subreports/Top25PercentDown_subreport.jasper
index aaca5f9..875434d 100644
Binary files a/report-templates/subreports/Top25PercentDown_subreport.jasper and b/report-templates/subreports/Top25PercentDown_subreport.jasper differ
diff --git a/report-templates/subreports/TotalBytesTransferredByInterface_subreport1.jasper b/report-templates/subreports/TotalBytesTransferredByInterface_subreport1.jasper
index 1cc45dd..e1721d3 100644
Binary files a/report-templates/subreports/TotalBytesTransferredByInterface_subreport1.jasper and b/report-templates/subreports/TotalBytesTransferredByInterface_subreport1.jasper differ
diff --git a/shell.init.script b/shell.init.script
index edcd769..74adbde 100644
--- a/shell.init.script
+++ b/shell.init.script
@@ -124,4 +124,4 @@ enable-3x-aliases = {
echo "Karaf 3.x aliases enabled"
-}
\ No newline at end of file
+}
diff --git a/snmp-graph.properties.d/activemq-graph.properties b/snmp-graph.properties.d/activemq-graph.properties
new file mode 100644
index 0000000..672c165
--- /dev/null
+++ b/snmp-graph.properties.d/activemq-graph.properties
@@ -0,0 +1,141 @@
+reports=activemq.total.consumer.count, \
+activemq.total.connection.count, \
+activemq.message.count, \
+activemq.consumer.producer.count, \
+activemq.storage.limit, \
+activemq.storage.percentage, \
+activemq.temp.storage.limit, \
+activemq.temp.storage.percentage
+
+#
+# Consumer Count
+#
+report.activemq.total.consumer.count.name=ActiveMQ Consumer Count
+report.activemq.total.consumer.count.columns=TtlConsumerCnt
+report.activemq.total.consumer.count.type=interfaceSnmp
+report.activemq.total.consumer.count.command=--title="ActiveMQ Consumer Count" \
+ --units-exponent=0 \
+ --vertical-label="Consumer Count" \
+ DEF:val1={rrd1}:TtlConsumerCnt:AVERAGE \
+ AREA:val1#c4a000 \
+ LINE2:val1#000000:"Consumer Count " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+#
+# Total Connection Count
+#
+report.activemq.total.connection.count.name=ActiveMQ Total Connection Count
+report.activemq.total.connection.count.columns=TtlConCnt
+report.activemq.total.connection.count.type=interfaceSnmp
+report.activemq.total.connection.count.command=--title="ActiveMQ Connection Count" \
+ --units-exponent=0 \
+ --vertical-label="Connection Count" \
+ DEF:val1={rrd1}:TtlConCnt:AVERAGE \
+ AREA:val1#4e9a06 \
+ LINE2:val1#000000:"Connection Count " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+#
+# Message Count
+#
+report.activemq.message.count.name=ActiveMQ Message Size
+report.activemq.message.count.columns=MinMsgSize, AvgMsgSize, MaxMsgSize
+report.activemq.message.count.type=interfaceSnmp
+report.activemq.message.count.command=--title="ActiveMQ Message Size" \
+ --units-exponent=0 \
+ --vertical-label="Message Size" \
+ DEF:val1={rrd1}:MinMsgSize:AVERAGE \
+ DEF:val2={rrd2}:AvgMsgSize:AVERAGE \
+ DEF:val3={rrd3}:MaxMsgSize:AVERAGE \
+ COMMENT:"Message Size\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n" \
+ LINE2:val3#c17d11:"Avg " \
+ GPRINT:val3:AVERAGE:"%10.2lf\\n"
+
+#
+# Prducer-Consumer Count
+#
+report.activemq.consumer.producer.count.name=ActiveMQ Producer-Consumer Count
+report.activemq.consumer.producer.count.columns=TtlProdCnt,TtlConsumerCnt
+report.activemq.consumer.producer.count.type=interfaceSnmp
+report.activemq.consumer.producer.count.command=--title="ActiveMQ Producer-Consumer Count" \
+ --units-exponent=0 \
+ --vertical-label="Producer-Consumer Count" \
+ DEF:val1={rrd1}:TtlProdCnt:AVERAGE \
+ DEF:val2={rrd2}:TtlConsumerCnt:AVERAGE \
+ COMMENT:"Producer Consumer Count\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n"
+
+#
+# Storage Limit
+#
+report.activemq.storage.limit.name=ActiveMQ Storage Limit
+report.activemq.storage.limit.columns=StoreLimit
+report.activemq.storage.limit.type=interfaceSnmp
+report.activemq.storage.limit.command=--title="ActiveMQ Storage Limit" \
+ --units-exponent=0 \
+ --vertical-label="Storage Limit" \
+ DEF:val1={rrd1}:StoreLimit:AVERAGE \
+ AREA:val1#ce5c00 \
+ LINE2:val1#000000:"Storage Limit" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+#
+# Percentage Usage
+#
+report.activemq.storage.percentage.name=ActiveMQ Storage Percentage Usage
+report.activemq.storage.percentage.columns=StorePctUsage
+report.activemq.storage.percentage.type=interfaceSnmp
+report.activemq.storage.percentage.command=--title="ActiveMQ Storage Percentage Usage" \
+ --units-exponent=0 \
+ --vertical-label="Storage Percentage Usage" \
+ DEF:val1={rrd1}:StorePctUsage:AVERAGE \
+ AREA:val1#8f5902 \
+ LINE2:val1#000000:"Storage Percentage Usage" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+#
+# Temp Storage Limit
+#
+report.activemq.temp.storage.limit.name=ActiveMQ Temp Storage Limit
+report.activemq.temp.storage.limit.columns=TempLimit
+report.activemq.temp.storage.limit.type=interfaceSnmp
+report.activemq.temp.storage.limit.command=--title\="ActiveMQ Temp Storage Limit" \
+ --units-exponent=0 \
+ --vertical-label="Temp Storage Limit" \
+ DEF:val1={rrd1}:TempLimit:AVERAGE \
+ AREA:val1#ce5c00 \
+ LINE2:val1#000000:"Temp Storage Limit" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+#
+# Temp Percentage Storage
+#
+report.activemq.temp.storage.percentage.name=ActiveMQ Temp Storage Percentage Used
+report.activemq.temp.storage.percentage.columns=TempPctUsage
+report.activemq.temp.storage.percentage.type=interfaceSnmp
+report.activemq.temp.storage.percentage.command=--title\="ActiveMQ Temp Storage Percentage Used" \
+ --units-exponent=0 \
+ --vertical-label="Temp Storage Percentage Used" \
+ DEF:val1={rrd1}:TempPctUsage:AVERAGE \
+ AREA:val1#8f5902 \
+ LINE2:val1#000000:"Temp Percentage Storage Used" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
diff --git a/snmp-graph.properties.d/bluecoat-sgproxy-graph.properties b/snmp-graph.properties.d/bluecoat-sgproxy-graph.properties
index d668ba9..f653de8 100644
--- a/snmp-graph.properties.d/bluecoat-sgproxy-graph.properties
+++ b/snmp-graph.properties.d/bluecoat-sgproxy-graph.properties
@@ -69,7 +69,7 @@ report.sgProxy.workers.command=--title="Client-Server Workers" \
STACK:ServerConnections#0000ff:"ServerConnections" \
GPRINT:ServerConnections:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:ServerConnections:MIN:"Min \\: %8.2lf %s" \
- GPRINT:ServerConnections:MAX:"Max \\: %8.2lf %s\\n"
+ GPRINT:ServerConnections:MAX:"Max \\: %8.2lf %s\\n"
report.sgProxy.client.connections.name=ProxySG Client Workers
report.sgProxy.client.connections.columns=ClientConnections,ClientConnectionsAc,ClientConnectionsId
@@ -92,7 +92,7 @@ report.sgProxy.client.connections.command=--title="Client Workers" \
STACK:ClientConnectionsId#ff0000:"ClientConnectionsId" \
GPRINT:ClientConnectionsId:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:ClientConnectionsId:MIN:"Min \\: %8.2lf %s" \
- GPRINT:ClientConnectionsId:MAX:"Max \\: %8.2lf %s\\n"
+ GPRINT:ClientConnectionsId:MAX:"Max \\: %8.2lf %s\\n"
report.sgProxy.server.connections.name=ProxySG Server Workers
report.sgProxy.server.connections.columns=ServerConnections,ServerConnectionsAc,ServerConnectionsId
@@ -133,7 +133,7 @@ report.sgProxy.cpu.command=--title="CPU Usage" \
STACK:CpuIdlePerCent#00ff00:"CpuIdlePerCent" \
GPRINT:CpuIdlePerCent:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:CpuIdlePerCent:MIN:"Min \\: %8.2lf %s" \
- GPRINT:CpuIdlePerCent:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:CpuIdlePerCent:MAX:"Max \\: %8.2lf %s\\n"
report.sgProxy.cache.name=ProxySG Cache
report.sgProxy.cache.columns=ByteRateHit,ByteRatePartialHit,ByteRateMiss
diff --git a/snmp-graph.properties.d/cassandra-graph.properties b/snmp-graph.properties.d/cassandra-graph.properties
new file mode 100644
index 0000000..2263b27
--- /dev/null
+++ b/snmp-graph.properties.d/cassandra-graph.properties
@@ -0,0 +1,361 @@
+reports=cassandra.metrics.Client, \
+cassandra.metrics.Compaction.Bytes, \
+cassandra.metrics.Compaction.Tasks, \
+cassandra.metrics.Storage.Load, \
+cassandra.metrics.Storage.Exceptions, \
+cassandra.metrics.DroppedMessages, \
+cassandra.metrics.ThreadPools.internal.MemtableFlushWriter, \
+cassandra.metrics.ThreadPools.internal.MemtablePostFlush, \
+cassandra.metrics.ThreadPools.internal.AntiEntropyStage, \
+cassandra.metrics.ThreadPools.internal.GossipStage, \
+cassandra.metrics.ThreadPools.internal.MigrationStage, \
+cassandra.metrics.ThreadPools.internal.MiscStage, \
+cassandra.metrics.ThreadPools.MutationStage, \
+cassandra.metrics.ThreadPools.request.ReadStage, \
+cassandra.metrics.ThreadPools.RequestResponseStage, \
+cassandra.metrics.ThreadPools.ReadRepairStage
+
+report.cassandra.metrics.Client.name=Cassandra Client Connections
+report.cassandra.metrics.Client.columns=clntConNativeClnts, clntConThriftClnts
+report.cassandra.metrics.Client.type=interfaceSnmp
+report.cassandra.metrics.Client.command=--title="Cassandra Client Connections" \
+ --vertical-label="Clients" \
+ DEF:val1={rrd1}:clntConNativeClnts:AVERAGE \
+ DEF:val2={rrd2}:clntConThriftClnts:AVERAGE \
+ AREA:val1#cc0000:"Connected Native Clients" \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:val2#f57900:"Connected Thrift Clients" \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.Compaction.Bytes.name=Cassandra Compaction
+report.cassandra.metrics.Compaction.Bytes.columns=cpctBytesCompacted
+report.cassandra.metrics.Compaction.Bytes.type=interfaceSnmp
+report.cassandra.metrics.Compaction.Bytes.command=--title="Cassandra Compaction" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:cpctBytesCompacted:AVERAGE \
+ AREA:val1#babdb6 \
+ LINE1.5:val1#888a85:"Bytes Compacted" \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.Compaction.Tasks.name=Cassandra Compaction Tasks
+report.cassandra.metrics.Compaction.Tasks.columns=cpctPendingTasks, cpctCompletedTasks
+report.cassandra.metrics.Compaction.Tasks.type=interfaceSnmp
+report.cassandra.metrics.Compaction.Tasks.command=--title="Cassandra Compaction Tasks" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:cpctPendingTasks:AVERAGE \
+ DEF:val2={rrd2}:cpctCompletedTasks:AVERAGE \
+ AREA:val1#cc0000:"Compaction Tasks Pending " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:val2#f57900:"Compaction Tasks Completed" \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.Storage.Load.name=Cassandra Storage Load
+report.cassandra.metrics.Storage.Load.columns=strgLoad
+report.cassandra.metrics.Storage.Load.type=interfaceSnmp
+report.cassandra.metrics.Storage.Load.command=--title="Cassandra Storage Load" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:strgLoad:AVERAGE \
+ AREA:val1#babdb6 \
+ LINE1.5:val1#888a85:"Storage Load " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.Storage.Exceptions.name=Cassandra Exceptions
+report.cassandra.metrics.Storage.Exceptions.columns=strgExceptions
+report.cassandra.metrics.Storage.Exceptions.type=interfaceSnmp
+report.cassandra.metrics.Storage.Exceptions.command=--title="Cassandra Exceptions" \
+ --vertical-label="Exceptions" \
+ DEF:val1={rrd1}:strgExceptions:AVERAGE \
+ LINE1.5:val1#3465a4:"Unhandled Exceptions " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.DroppedMessages.name=Cassandra Dropped Messages
+report.cassandra.metrics.DroppedMessages.columns=drpdMsgRead, drpdMsgReadRepair, drpdMsgReqResp, drpdMsgRangeSlice, drpdMsgMutation
+report.cassandra.metrics.DroppedMessages.type=interfaceSnmp
+report.cassandra.metrics.DroppedMessages.command=--title="Cassandra Dropped Messages" \
+ --vertical-label="Dropped Messages" \
+ DEF:val1={rrd1}:drpdMsgRead:AVERAGE \
+ DEF:val2={rrd2}:drpdMsgReadRepair:AVERAGE \
+ DEF:val3={rrd3}:drpdMsgReqResp:AVERAGE \
+ DEF:val4={rrd4}:drpdMsgRangeSlice:AVERAGE \
+ DEF:val5={rrd5}:drpdMsgMutation:AVERAGE \
+ LINE1.5:val1#f57900:"Read " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#cc0000:"Read Repair " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#4e9a06:"Request Response " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#3465a4:"Range Slice " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val5#5c3566:"Message Mutation " \
+ GPRINT:val5:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val5:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val5:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.name=Cassandra Thread Pool Memtable Flush Writer
+report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.columns=tpIntMemTblFlsWrAt, tpIntMemTblFlsWrCbt
+report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.command=--title="Cassandra Thread Pool Memtable Flush Writer" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntMemTblFlsWrAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntMemTblFlsWrCbt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.name=Cassandra Thread Pool Memtable Post Flush Writer
+report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.columns=tpIntMemTblPoFlsAt, tpIntMemTblPoFlsCbt, tpIntMemTblPoFlsPt
+report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.command=--title="Cassandra Thread Pool Memtable Post Flush Writer" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntMemTblPoFlsAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntMemTblPoFlsCbt:AVERAGE \
+ DEF:val3={rrd3}:tpIntMemTblPoFlsPt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.name=Thread Pool Internal Anti-Entropy Stage
+report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.columns=tpIntAntiEntStgeAt, tpIntAntiEntStgeCbt, tpIntAntiEntStgePt, tpIntAntiEntStgeCt
+report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.command=--title="Thread Pool Internal Anti-Entropy Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntAntiEntStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntAntiEntStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpIntAntiEntStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpIntAntiEntStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.GossipStage.name=Thread Pool Internal Gossip Stage
+report.cassandra.metrics.ThreadPools.internal.GossipStage.columns=tpIntGosStgeAt, tpIntGosStgeCbt, tpIntGosStgePt, tpIntGosStgeCt
+report.cassandra.metrics.ThreadPools.internal.GossipStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.GossipStage.command=--title="Thread Pool Internal Gossip Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntGosStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntGosStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpIntGosStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpIntGosStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.MigrationStage.name=Thread Pool Internal Migration Stage
+report.cassandra.metrics.ThreadPools.internal.MigrationStage.columns=tpIntMigStgeAt, tpIntMigStgeCbt, tpIntMigStgePt, tpIntMigStgeCt
+report.cassandra.metrics.ThreadPools.internal.MigrationStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.MigrationStage.command=--title="Thread Pool Internal Migration Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntMigStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntMigStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpIntMigStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpIntMigStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.internal.MiscStage.name=Thread Pool Internal Misc Stage
+report.cassandra.metrics.ThreadPools.internal.MiscStage.columns=tpIntMiscStgeAt, tpIntMiscStgeCbt, tpIntMiscStgePt, tpIntMiscStgeCt
+report.cassandra.metrics.ThreadPools.internal.MiscStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.internal.MiscStage.command=--title="Thread Pool Internal Misc Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpIntMiscStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpIntMiscStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpIntMiscStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpIntMiscStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.MutationStage.name=Thread Pool Mutation Stage
+report.cassandra.metrics.ThreadPools.MutationStage.columns=tpMutStgeAt, tpMutStgeCbt, tpMutStgePt, tpMutStgeCt
+report.cassandra.metrics.ThreadPools.MutationStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.MutationStage.command=--title="Thread Pool Mutation Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpMutStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpMutStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpMutStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpMutStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.request.ReadStage.name=Thread Pool Read Stage
+report.cassandra.metrics.ThreadPools.request.ReadStage.columns=tpReadStageAt, tpReadStageCbt, tpReadStagePt, tpReadStageCt
+report.cassandra.metrics.ThreadPools.request.ReadStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.request.ReadStage.command=--title="Thread Pool Read Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpReadStageAt:AVERAGE \
+ DEF:val2={rrd2}:tpReadStageCbt:AVERAGE \
+ DEF:val3={rrd3}:tpReadStagePt:AVERAGE \
+ DEF:val4={rrd4}:tpReadStageCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.RequestResponseStage.name=Thread Pool Request Response Stage
+report.cassandra.metrics.ThreadPools.RequestResponseStage.columns=tpReqRespStgeAt, tpReqRespStgeCbt, tpReqRespStgePt, tpReqRespStgeCt
+report.cassandra.metrics.ThreadPools.RequestResponseStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.RequestResponseStage.command=--title="Thread Pool Request Response Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpReqRespStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpReqRespStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpReqRespStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpReqRespStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.ThreadPools.ReadRepairStage.name=Thread Pool Read Repair Stage
+report.cassandra.metrics.ThreadPools.ReadRepairStage.columns=tpReadRepairStgeAt, tpReadRepairStgeCbt, tpReadRepairStgePt, tpReadRepairStgeCt
+report.cassandra.metrics.ThreadPools.ReadRepairStage.type=interfaceSnmp
+report.cassandra.metrics.ThreadPools.ReadRepairStage.command=--title="Thread Pool Read Repair Stage" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:tpReadRepairStgeAt:AVERAGE \
+ DEF:val2={rrd2}:tpReadRepairStgeCbt:AVERAGE \
+ DEF:val3={rrd3}:tpReadRepairStgePt:AVERAGE \
+ DEF:val4={rrd4}:tpReadRepairStgeCt:AVERAGE \
+ LINE1.5:val1#cc0000:"Active Tasks " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#f57900:"Currently Blocked Tasks " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#3465a4:"Pending Tasks " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val4#4e9a06:"Completed Tasks " \
+ GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
+
+
diff --git a/snmp-graph.properties.d/cassandra-newts-graph.properties b/snmp-graph.properties.d/cassandra-newts-graph.properties
new file mode 100644
index 0000000..427ce1a
--- /dev/null
+++ b/snmp-graph.properties.d/cassandra-newts-graph.properties
@@ -0,0 +1,196 @@
+reports=cassandra.metrics.keyspace.newts.AllMemtables.DataSize, \
+cassandra.metrics.keyspace.newts.Memtables.Switch.Counter, \
+cassandra.metrics.keyspace.newts.Memtables.Columns.Counter, \
+cassandra.metrics.keyspace.newts.Memtable.DataSize, \
+cassandra.metrics.keyspace.newts.rwLatency, \
+cassandra.metrics.keyspace.newts.RangeLatency.99th, \
+cassandra.metrics.keyspace.newts.Latency, \
+cassandra.metrics.keyspace.newts.Bloom.Disk, \
+cassandra.metrics.keyspace.newts.Bloom.Memory, \
+cassandra.metrics.keyspace.newts.MemoryUsed, \
+cassandra.metrics.keyspace.newts.pending, \
+cassandra.metrics.keyspace.newts.DiskSpace
+
+report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.name=All Memtables Data Size
+report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.columns=alMemTblLiDaSi, alMemTblOffHeapDaSi, alMemTblOnHeapDaSi
+report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.command=--title="All Memtables Data Size" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:alMemTblLiDaSi:AVERAGE \
+ DEF:val2={rrd2}:alMemTblOffHeapDaSi:AVERAGE \
+ DEF:val3={rrd3}:alMemTblOnHeapDaSi:AVERAGE \
+ LINE1.5:val1#3465a4:"Live Data Size " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#75507b:"Off-Heap Data Size " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val3#c17d11:"On-Heap Data Size " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.name=All Memtables Switch Counter
+report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.columns=memTblSwitchCount
+report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.command=--title="All Memtables Switch Counter" \
+ --vertical-label="Number of Times" \
+ DEF:val1={rrd1}:memTblSwitchCount:AVERAGE \
+ LINE1.5:val1#3465a4:"Switch Counter " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.name=All Memtables Columns Counter
+report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.columns=memTblColumnsCnt
+report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.command=--title="All Memtables Columns Counter" \
+ --vertical-label="Columns" \
+ DEF:val1={rrd1}:memTblColumnsCnt:AVERAGE \
+ LINE1.5:val1#3465a4:"Columns " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Memtable.DataSize.name=Newts Memtable Data Size
+report.cassandra.metrics.keyspace.newts.Memtable.DataSize.columns=memTblLiveDaSi, memTblOffHeapDaSi, memTblOnHeapDaSi
+report.cassandra.metrics.keyspace.newts.Memtable.DataSize.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Memtable.DataSize.command=--title="Newts Memtable Data Size" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:memTblLiveDaSi:AVERAGE \
+ DEF:val2={rrd2}:memTblOffHeapDaSi:AVERAGE \
+ DEF:val3={rrd3}:memTblOnHeapDaSi:AVERAGE \
+ LINE1.5:val1#3465a4:"Live Data Size " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#75507b:"Off-Heap Data Size " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#c17d11:"On-Heap Data Size " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.rwLatency.name=Newts Read and Write Latency
+report.cassandra.metrics.keyspace.newts.rwLatency.columns=readTotLtncy, writeTotLtncy
+report.cassandra.metrics.keyspace.newts.rwLatency.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.rwLatency.command=--title="Newts Read and Write Latency" \
+ --vertical-label="micro seconds" \
+ DEF:val1={rrd1}:readTotLtncy:AVERAGE \
+ DEF:val2={rrd2}:writeTotLtncy:AVERAGE \
+ LINE1.5:val1#73d216:"Read Total Latency " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#3465a4:"Write Total Latency " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.RangeLatency.99th.name=Newts Range Latency 99th Percentile
+report.cassandra.metrics.keyspace.newts.RangeLatency.99th.columns=rangeLtncy99
+report.cassandra.metrics.keyspace.newts.RangeLatency.99th.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.RangeLatency.99th.command=--title="Newts Range Latency 99th Percentile" \
+ --vertical-label="micro seconds" \
+ DEF:val1={rrd1}:rangeLtncy99:AVERAGE \
+ LINE1.5:val1#3465a4:"Range Latency 99 Percentile " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Latency.name=Newts Latency
+report.cassandra.metrics.keyspace.newts.Latency.columns=casCommitTotLtncy, casPrepareTotLtncy, casProposeTotLtncy
+report.cassandra.metrics.keyspace.newts.Latency.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Latency.command=--title="Newts Latency" \
+ --vertical-label="micro seconds" \
+ DEF:val1={rrd1}:casCommitTotLtncy:AVERAGE \
+ DEF:val2={rrd2}:casPrepareTotLtncy:AVERAGE \
+ DEF:val3={rrd3}:casProposeTotLtncy:AVERAGE \
+ LINE1.5:val1#f57900:"Commit Total Latency " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:val2#3465a4:"Preprare Total Latency " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:val3#75507b:"Propose Total Latency " \
+ GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Bloom.Disk.name=Bloom Filter Disk Usage
+report.cassandra.metrics.keyspace.newts.Bloom.Disk.columns=blmFltrDskSpcUsed
+report.cassandra.metrics.keyspace.newts.Bloom.Disk.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Bloom.Disk.command=--title="Bloom Filter Disk Usage" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:blmFltrDskSpcUsed:AVERAGE \
+ AREA:val1#babdb6 \
+ LINE1.5:val1#888a85:"Disk Space Used " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.Bloom.Memory.name=Bloom Filter Memory Usage
+report.cassandra.metrics.keyspace.newts.Bloom.Memory.columns=blmFltrOffHeapMemUs
+report.cassandra.metrics.keyspace.newts.Bloom.Memory.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.Bloom.Memory.command=--title="Bloom Filter Memory Usage" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:blmFltrOffHeapMemUs:AVERAGE \
+ AREA:val1#babdb6 \
+ LINE1.5:val1#888a85:"Off-Heap Memory Used " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.MemoryUsed.name=Newts Memory Used
+report.cassandra.metrics.keyspace.newts.MemoryUsed.columns=cmpMetaOffHeapMemUs, idxSumOffHeapMemUs
+report.cassandra.metrics.keyspace.newts.MemoryUsed.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.MemoryUsed.command=--title="Newts Memory Used" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:cmpMetaOffHeapMemUs:AVERAGE \
+ DEF:val2={rrd2}:idxSumOffHeapMemUs:AVERAGE \
+ LINE1.5:val1#f57900:"Compression Metadata Off-Heap Memory Used " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#3465a4:"Index Summary Off-Heap Memory Used " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.pending.name=Newts Pending
+report.cassandra.metrics.keyspace.newts.pending.columns=pendingCompactions, pendingFlushes
+report.cassandra.metrics.keyspace.newts.pending.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.pending.command=--title="Newts Pending" \
+ --vertical-label="Tasks" \
+ DEF:val1={rrd1}:pendingCompactions:AVERAGE \
+ DEF:val2={rrd2}:pendingFlushes:AVERAGE \
+ LINE1.5:val1#f57900:"Pending Compactions " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#3465a4:"Pending Flushes " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
+
+report.cassandra.metrics.keyspace.newts.DiskSpace.name=Newts Disk Space
+report.cassandra.metrics.keyspace.newts.DiskSpace.columns=totalDiskSpaceUsed, liveDiskSpaceUsed
+report.cassandra.metrics.keyspace.newts.DiskSpace.type=interfaceSnmp
+report.cassandra.metrics.keyspace.newts.DiskSpace.command=--title="Newts Disk Space" \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:totalDiskSpaceUsed:AVERAGE \
+ DEF:val2={rrd2}:liveDiskSpaceUsed:AVERAGE \
+ LINE1.5:val1#f57900:"Total Disk Space Used " \
+ GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
+ LINE1.5:val2#3465a4:"Live Disk Space Used " \
+ GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/cassandra21x-graph.properties b/snmp-graph.properties.d/cassandra21x-graph.properties
deleted file mode 100644
index 2263b27..0000000
--- a/snmp-graph.properties.d/cassandra21x-graph.properties
+++ /dev/null
@@ -1,361 +0,0 @@
-reports=cassandra.metrics.Client, \
-cassandra.metrics.Compaction.Bytes, \
-cassandra.metrics.Compaction.Tasks, \
-cassandra.metrics.Storage.Load, \
-cassandra.metrics.Storage.Exceptions, \
-cassandra.metrics.DroppedMessages, \
-cassandra.metrics.ThreadPools.internal.MemtableFlushWriter, \
-cassandra.metrics.ThreadPools.internal.MemtablePostFlush, \
-cassandra.metrics.ThreadPools.internal.AntiEntropyStage, \
-cassandra.metrics.ThreadPools.internal.GossipStage, \
-cassandra.metrics.ThreadPools.internal.MigrationStage, \
-cassandra.metrics.ThreadPools.internal.MiscStage, \
-cassandra.metrics.ThreadPools.MutationStage, \
-cassandra.metrics.ThreadPools.request.ReadStage, \
-cassandra.metrics.ThreadPools.RequestResponseStage, \
-cassandra.metrics.ThreadPools.ReadRepairStage
-
-report.cassandra.metrics.Client.name=Cassandra Client Connections
-report.cassandra.metrics.Client.columns=clntConNativeClnts, clntConThriftClnts
-report.cassandra.metrics.Client.type=interfaceSnmp
-report.cassandra.metrics.Client.command=--title="Cassandra Client Connections" \
- --vertical-label="Clients" \
- DEF:val1={rrd1}:clntConNativeClnts:AVERAGE \
- DEF:val2={rrd2}:clntConThriftClnts:AVERAGE \
- AREA:val1#cc0000:"Connected Native Clients" \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- STACK:val2#f57900:"Connected Thrift Clients" \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.Compaction.Bytes.name=Cassandra Compaction
-report.cassandra.metrics.Compaction.Bytes.columns=cpctBytesCompacted
-report.cassandra.metrics.Compaction.Bytes.type=interfaceSnmp
-report.cassandra.metrics.Compaction.Bytes.command=--title="Cassandra Compaction" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:cpctBytesCompacted:AVERAGE \
- AREA:val1#babdb6 \
- LINE1.5:val1#888a85:"Bytes Compacted" \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.Compaction.Tasks.name=Cassandra Compaction Tasks
-report.cassandra.metrics.Compaction.Tasks.columns=cpctPendingTasks, cpctCompletedTasks
-report.cassandra.metrics.Compaction.Tasks.type=interfaceSnmp
-report.cassandra.metrics.Compaction.Tasks.command=--title="Cassandra Compaction Tasks" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:cpctPendingTasks:AVERAGE \
- DEF:val2={rrd2}:cpctCompletedTasks:AVERAGE \
- AREA:val1#cc0000:"Compaction Tasks Pending " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- STACK:val2#f57900:"Compaction Tasks Completed" \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.Storage.Load.name=Cassandra Storage Load
-report.cassandra.metrics.Storage.Load.columns=strgLoad
-report.cassandra.metrics.Storage.Load.type=interfaceSnmp
-report.cassandra.metrics.Storage.Load.command=--title="Cassandra Storage Load" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:strgLoad:AVERAGE \
- AREA:val1#babdb6 \
- LINE1.5:val1#888a85:"Storage Load " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.Storage.Exceptions.name=Cassandra Exceptions
-report.cassandra.metrics.Storage.Exceptions.columns=strgExceptions
-report.cassandra.metrics.Storage.Exceptions.type=interfaceSnmp
-report.cassandra.metrics.Storage.Exceptions.command=--title="Cassandra Exceptions" \
- --vertical-label="Exceptions" \
- DEF:val1={rrd1}:strgExceptions:AVERAGE \
- LINE1.5:val1#3465a4:"Unhandled Exceptions " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.DroppedMessages.name=Cassandra Dropped Messages
-report.cassandra.metrics.DroppedMessages.columns=drpdMsgRead, drpdMsgReadRepair, drpdMsgReqResp, drpdMsgRangeSlice, drpdMsgMutation
-report.cassandra.metrics.DroppedMessages.type=interfaceSnmp
-report.cassandra.metrics.DroppedMessages.command=--title="Cassandra Dropped Messages" \
- --vertical-label="Dropped Messages" \
- DEF:val1={rrd1}:drpdMsgRead:AVERAGE \
- DEF:val2={rrd2}:drpdMsgReadRepair:AVERAGE \
- DEF:val3={rrd3}:drpdMsgReqResp:AVERAGE \
- DEF:val4={rrd4}:drpdMsgRangeSlice:AVERAGE \
- DEF:val5={rrd5}:drpdMsgMutation:AVERAGE \
- LINE1.5:val1#f57900:"Read " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#cc0000:"Read Repair " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#4e9a06:"Request Response " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#3465a4:"Range Slice " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val5#5c3566:"Message Mutation " \
- GPRINT:val5:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val5:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val5:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.name=Cassandra Thread Pool Memtable Flush Writer
-report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.columns=tpIntMemTblFlsWrAt, tpIntMemTblFlsWrCbt
-report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.MemtableFlushWriter.command=--title="Cassandra Thread Pool Memtable Flush Writer" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntMemTblFlsWrAt:AVERAGE \
- DEF:val2={rrd2}:tpIntMemTblFlsWrCbt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.name=Cassandra Thread Pool Memtable Post Flush Writer
-report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.columns=tpIntMemTblPoFlsAt, tpIntMemTblPoFlsCbt, tpIntMemTblPoFlsPt
-report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.MemtablePostFlush.command=--title="Cassandra Thread Pool Memtable Post Flush Writer" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntMemTblPoFlsAt:AVERAGE \
- DEF:val2={rrd2}:tpIntMemTblPoFlsCbt:AVERAGE \
- DEF:val3={rrd3}:tpIntMemTblPoFlsPt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.name=Thread Pool Internal Anti-Entropy Stage
-report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.columns=tpIntAntiEntStgeAt, tpIntAntiEntStgeCbt, tpIntAntiEntStgePt, tpIntAntiEntStgeCt
-report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.AntiEntropyStage.command=--title="Thread Pool Internal Anti-Entropy Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntAntiEntStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpIntAntiEntStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpIntAntiEntStgePt:AVERAGE \
- DEF:val4={rrd4}:tpIntAntiEntStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.GossipStage.name=Thread Pool Internal Gossip Stage
-report.cassandra.metrics.ThreadPools.internal.GossipStage.columns=tpIntGosStgeAt, tpIntGosStgeCbt, tpIntGosStgePt, tpIntGosStgeCt
-report.cassandra.metrics.ThreadPools.internal.GossipStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.GossipStage.command=--title="Thread Pool Internal Gossip Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntGosStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpIntGosStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpIntGosStgePt:AVERAGE \
- DEF:val4={rrd4}:tpIntGosStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.MigrationStage.name=Thread Pool Internal Migration Stage
-report.cassandra.metrics.ThreadPools.internal.MigrationStage.columns=tpIntMigStgeAt, tpIntMigStgeCbt, tpIntMigStgePt, tpIntMigStgeCt
-report.cassandra.metrics.ThreadPools.internal.MigrationStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.MigrationStage.command=--title="Thread Pool Internal Migration Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntMigStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpIntMigStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpIntMigStgePt:AVERAGE \
- DEF:val4={rrd4}:tpIntMigStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.internal.MiscStage.name=Thread Pool Internal Misc Stage
-report.cassandra.metrics.ThreadPools.internal.MiscStage.columns=tpIntMiscStgeAt, tpIntMiscStgeCbt, tpIntMiscStgePt, tpIntMiscStgeCt
-report.cassandra.metrics.ThreadPools.internal.MiscStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.internal.MiscStage.command=--title="Thread Pool Internal Misc Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpIntMiscStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpIntMiscStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpIntMiscStgePt:AVERAGE \
- DEF:val4={rrd4}:tpIntMiscStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.MutationStage.name=Thread Pool Mutation Stage
-report.cassandra.metrics.ThreadPools.MutationStage.columns=tpMutStgeAt, tpMutStgeCbt, tpMutStgePt, tpMutStgeCt
-report.cassandra.metrics.ThreadPools.MutationStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.MutationStage.command=--title="Thread Pool Mutation Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpMutStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpMutStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpMutStgePt:AVERAGE \
- DEF:val4={rrd4}:tpMutStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.request.ReadStage.name=Thread Pool Read Stage
-report.cassandra.metrics.ThreadPools.request.ReadStage.columns=tpReadStageAt, tpReadStageCbt, tpReadStagePt, tpReadStageCt
-report.cassandra.metrics.ThreadPools.request.ReadStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.request.ReadStage.command=--title="Thread Pool Read Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpReadStageAt:AVERAGE \
- DEF:val2={rrd2}:tpReadStageCbt:AVERAGE \
- DEF:val3={rrd3}:tpReadStagePt:AVERAGE \
- DEF:val4={rrd4}:tpReadStageCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.RequestResponseStage.name=Thread Pool Request Response Stage
-report.cassandra.metrics.ThreadPools.RequestResponseStage.columns=tpReqRespStgeAt, tpReqRespStgeCbt, tpReqRespStgePt, tpReqRespStgeCt
-report.cassandra.metrics.ThreadPools.RequestResponseStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.RequestResponseStage.command=--title="Thread Pool Request Response Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpReqRespStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpReqRespStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpReqRespStgePt:AVERAGE \
- DEF:val4={rrd4}:tpReqRespStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.ThreadPools.ReadRepairStage.name=Thread Pool Read Repair Stage
-report.cassandra.metrics.ThreadPools.ReadRepairStage.columns=tpReadRepairStgeAt, tpReadRepairStgeCbt, tpReadRepairStgePt, tpReadRepairStgeCt
-report.cassandra.metrics.ThreadPools.ReadRepairStage.type=interfaceSnmp
-report.cassandra.metrics.ThreadPools.ReadRepairStage.command=--title="Thread Pool Read Repair Stage" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:tpReadRepairStgeAt:AVERAGE \
- DEF:val2={rrd2}:tpReadRepairStgeCbt:AVERAGE \
- DEF:val3={rrd3}:tpReadRepairStgePt:AVERAGE \
- DEF:val4={rrd4}:tpReadRepairStgeCt:AVERAGE \
- LINE1.5:val1#cc0000:"Active Tasks " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#f57900:"Currently Blocked Tasks " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#3465a4:"Pending Tasks " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val4#4e9a06:"Completed Tasks " \
- GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val4:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val4:MAX:" Max \\: %8.2lf %s\\n"
-
-
diff --git a/snmp-graph.properties.d/cassandra21x-newts-graph.properties b/snmp-graph.properties.d/cassandra21x-newts-graph.properties
deleted file mode 100644
index 427ce1a..0000000
--- a/snmp-graph.properties.d/cassandra21x-newts-graph.properties
+++ /dev/null
@@ -1,196 +0,0 @@
-reports=cassandra.metrics.keyspace.newts.AllMemtables.DataSize, \
-cassandra.metrics.keyspace.newts.Memtables.Switch.Counter, \
-cassandra.metrics.keyspace.newts.Memtables.Columns.Counter, \
-cassandra.metrics.keyspace.newts.Memtable.DataSize, \
-cassandra.metrics.keyspace.newts.rwLatency, \
-cassandra.metrics.keyspace.newts.RangeLatency.99th, \
-cassandra.metrics.keyspace.newts.Latency, \
-cassandra.metrics.keyspace.newts.Bloom.Disk, \
-cassandra.metrics.keyspace.newts.Bloom.Memory, \
-cassandra.metrics.keyspace.newts.MemoryUsed, \
-cassandra.metrics.keyspace.newts.pending, \
-cassandra.metrics.keyspace.newts.DiskSpace
-
-report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.name=All Memtables Data Size
-report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.columns=alMemTblLiDaSi, alMemTblOffHeapDaSi, alMemTblOnHeapDaSi
-report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.AllMemtables.DataSize.command=--title="All Memtables Data Size" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:alMemTblLiDaSi:AVERAGE \
- DEF:val2={rrd2}:alMemTblOffHeapDaSi:AVERAGE \
- DEF:val3={rrd3}:alMemTblOnHeapDaSi:AVERAGE \
- LINE1.5:val1#3465a4:"Live Data Size " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#75507b:"Off-Heap Data Size " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val3#c17d11:"On-Heap Data Size " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.name=All Memtables Switch Counter
-report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.columns=memTblSwitchCount
-report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Memtables.Switch.Counter.command=--title="All Memtables Switch Counter" \
- --vertical-label="Number of Times" \
- DEF:val1={rrd1}:memTblSwitchCount:AVERAGE \
- LINE1.5:val1#3465a4:"Switch Counter " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.name=All Memtables Columns Counter
-report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.columns=memTblColumnsCnt
-report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Memtables.Columns.Counter.command=--title="All Memtables Columns Counter" \
- --vertical-label="Columns" \
- DEF:val1={rrd1}:memTblColumnsCnt:AVERAGE \
- LINE1.5:val1#3465a4:"Columns " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Memtable.DataSize.name=Newts Memtable Data Size
-report.cassandra.metrics.keyspace.newts.Memtable.DataSize.columns=memTblLiveDaSi, memTblOffHeapDaSi, memTblOnHeapDaSi
-report.cassandra.metrics.keyspace.newts.Memtable.DataSize.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Memtable.DataSize.command=--title="Newts Memtable Data Size" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:memTblLiveDaSi:AVERAGE \
- DEF:val2={rrd2}:memTblOffHeapDaSi:AVERAGE \
- DEF:val3={rrd3}:memTblOnHeapDaSi:AVERAGE \
- LINE1.5:val1#3465a4:"Live Data Size " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#75507b:"Off-Heap Data Size " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#c17d11:"On-Heap Data Size " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.rwLatency.name=Newts Read and Write Latency
-report.cassandra.metrics.keyspace.newts.rwLatency.columns=readTotLtncy, writeTotLtncy
-report.cassandra.metrics.keyspace.newts.rwLatency.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.rwLatency.command=--title="Newts Read and Write Latency" \
- --vertical-label="micro seconds" \
- DEF:val1={rrd1}:readTotLtncy:AVERAGE \
- DEF:val2={rrd2}:writeTotLtncy:AVERAGE \
- LINE1.5:val1#73d216:"Read Total Latency " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#3465a4:"Write Total Latency " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.RangeLatency.99th.name=Newts Range Latency 99th Percentile
-report.cassandra.metrics.keyspace.newts.RangeLatency.99th.columns=rangeLtncy99
-report.cassandra.metrics.keyspace.newts.RangeLatency.99th.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.RangeLatency.99th.command=--title="Newts Range Latency 99th Percentile" \
- --vertical-label="micro seconds" \
- DEF:val1={rrd1}:rangeLtncy99:AVERAGE \
- LINE1.5:val1#3465a4:"Range Latency 99 Percentile " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Latency.name=Newts Latency
-report.cassandra.metrics.keyspace.newts.Latency.columns=casCommitTotLtncy, casPrepareTotLtncy, casProposeTotLtncy
-report.cassandra.metrics.keyspace.newts.Latency.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Latency.command=--title="Newts Latency" \
- --vertical-label="micro seconds" \
- DEF:val1={rrd1}:casCommitTotLtncy:AVERAGE \
- DEF:val2={rrd2}:casPrepareTotLtncy:AVERAGE \
- DEF:val3={rrd3}:casProposeTotLtncy:AVERAGE \
- LINE1.5:val1#f57900:"Commit Total Latency " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- STACK:val2#3465a4:"Preprare Total Latency " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n" \
- STACK:val3#75507b:"Propose Total Latency " \
- GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val3:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val3:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Bloom.Disk.name=Bloom Filter Disk Usage
-report.cassandra.metrics.keyspace.newts.Bloom.Disk.columns=blmFltrDskSpcUsed
-report.cassandra.metrics.keyspace.newts.Bloom.Disk.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Bloom.Disk.command=--title="Bloom Filter Disk Usage" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:blmFltrDskSpcUsed:AVERAGE \
- AREA:val1#babdb6 \
- LINE1.5:val1#888a85:"Disk Space Used " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.Bloom.Memory.name=Bloom Filter Memory Usage
-report.cassandra.metrics.keyspace.newts.Bloom.Memory.columns=blmFltrOffHeapMemUs
-report.cassandra.metrics.keyspace.newts.Bloom.Memory.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.Bloom.Memory.command=--title="Bloom Filter Memory Usage" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:blmFltrOffHeapMemUs:AVERAGE \
- AREA:val1#babdb6 \
- LINE1.5:val1#888a85:"Off-Heap Memory Used " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.MemoryUsed.name=Newts Memory Used
-report.cassandra.metrics.keyspace.newts.MemoryUsed.columns=cmpMetaOffHeapMemUs, idxSumOffHeapMemUs
-report.cassandra.metrics.keyspace.newts.MemoryUsed.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.MemoryUsed.command=--title="Newts Memory Used" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:cmpMetaOffHeapMemUs:AVERAGE \
- DEF:val2={rrd2}:idxSumOffHeapMemUs:AVERAGE \
- LINE1.5:val1#f57900:"Compression Metadata Off-Heap Memory Used " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#3465a4:"Index Summary Off-Heap Memory Used " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.pending.name=Newts Pending
-report.cassandra.metrics.keyspace.newts.pending.columns=pendingCompactions, pendingFlushes
-report.cassandra.metrics.keyspace.newts.pending.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.pending.command=--title="Newts Pending" \
- --vertical-label="Tasks" \
- DEF:val1={rrd1}:pendingCompactions:AVERAGE \
- DEF:val2={rrd2}:pendingFlushes:AVERAGE \
- LINE1.5:val1#f57900:"Pending Compactions " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#3465a4:"Pending Flushes " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
-
-report.cassandra.metrics.keyspace.newts.DiskSpace.name=Newts Disk Space
-report.cassandra.metrics.keyspace.newts.DiskSpace.columns=totalDiskSpaceUsed, liveDiskSpaceUsed
-report.cassandra.metrics.keyspace.newts.DiskSpace.type=interfaceSnmp
-report.cassandra.metrics.keyspace.newts.DiskSpace.command=--title="Newts Disk Space" \
- --vertical-label="Bytes" \
- DEF:val1={rrd1}:totalDiskSpaceUsed:AVERAGE \
- DEF:val2={rrd2}:liveDiskSpaceUsed:AVERAGE \
- LINE1.5:val1#f57900:"Total Disk Space Used " \
- GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val1:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val1:MAX:" Max \\: %8.2lf %s\\n" \
- LINE1.5:val2#3465a4:"Live Disk Space Used " \
- GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \
- GPRINT:val2:MIN:" Min \\: %8.2lf %s" \
- GPRINT:val2:MAX:" Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/checkpoint-graph.properties b/snmp-graph.properties.d/checkpoint-graph.properties
index 985a0a9..522f82d 100644
--- a/snmp-graph.properties.d/checkpoint-graph.properties
+++ b/snmp-graph.properties.d/checkpoint-graph.properties
@@ -38,7 +38,7 @@ report.checkpoint.pktsAccepted.command=--title="Packets Accepted" \
LINE2:accepted#0000ff:"Accepted" \
GPRINT:accepted:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:accepted:MIN:"Min \\: %8.2lf %s" \
- GPRINT:accepted:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:accepted:MAX:"Max \\: %8.2lf %s\\n"
report.checkpoint.pktsDropped.name=Packets Dropped (CheckPoint)
report.checkpoint.pktsDropped.columns=pktsDropped
@@ -49,7 +49,7 @@ report.checkpoint.pktsDropped.command=--title="Packets Dropped" \
LINE2:dropped#0000ff:"Dropped" \
GPRINT:dropped:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:dropped:MIN:"Min \\: %8.2lf %s" \
- GPRINT:dropped:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:dropped:MAX:"Max \\: %8.2lf %s\\n"
report.checkpoint.pktsLogged.name=Packets Logged (CheckPoint)
report.checkpoint.pktsLogged.columns=pktsLogged
@@ -60,7 +60,7 @@ report.checkpoint.pktsLogged.command=--title="Packets Logged" \
LINE2:logged#0000ff:"Logged" \
GPRINT:logged:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:logged:MIN:"Min \\: %8.2lf %s" \
- GPRINT:logged:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:logged:MAX:"Max \\: %8.2lf %s\\n"
report.checkpoint.pktsRejected.name=Packets Rejected (CheckPoint)
report.checkpoint.pktsRejected.columns=pktsRejected
@@ -71,7 +71,7 @@ report.checkpoint.pktsRejected.command=--title="Packets Rejected" \
LINE2:rejected#0000ff:"Rejected" \
GPRINT:rejected:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:rejected:MIN:"Min \\: %8.2lf %s" \
- GPRINT:rejected:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:rejected:MAX:"Max \\: %8.2lf %s\\n"
report.checkpoint.pktsEncrypted.name=Packets Encrypted and Decrypted (CheckPoint)
report.checkpoint.pktsEncrypted.columns=cpvEncPackets, cpvDecPackets
diff --git a/snmp-graph.properties.d/cisco-graph.properties b/snmp-graph.properties.d/cisco-graph.properties
index 3c28ddb..1bf9fd1 100644
--- a/snmp-graph.properties.d/cisco-graph.properties
+++ b/snmp-graph.properties.d/cisco-graph.properties
@@ -702,7 +702,7 @@ report.cisco.css.hits.command=--title="Hits per Second" --height 150 --width 600
LINE1:fixedHits#0000ff:"Hits " \
GPRINT:fixedHits:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:fixedHits:MIN:"Min\\: %8.2lf %s" \
- GPRINT:fixedHits:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:fixedHits:MAX:"Max\\: %8.2lf %s\\n"
report.cisco.css.bytes.name=CSS Bytes
report.cisco.css.bytes.height=150
@@ -716,7 +716,7 @@ report.cisco.css.bytes.command=--title="Bytes per Second" --height 150 --width 6
LINE1:Bytes#0000ff:"Bytes/second " \
GPRINT:Bytes:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:Bytes:MIN:"Min\\: %8.2lf %s" \
- GPRINT:Bytes:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:Bytes:MAX:"Max\\: %8.2lf %s\\n"
report.cisco.css.redirects.name=CSS Redirects
report.cisco.css.redirects.height=150
@@ -729,7 +729,7 @@ report.cisco.css.redirects.command=--title="Redirects per Second" --height 150 -
LINE1:Redirs#0000ff:"Redirects " \
GPRINT:Redirs:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:Redirs:MIN:"Min\\: %8.2lf %s" \
- GPRINT:Redirs:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:Redirs:MAX:"Max\\: %8.2lf %s\\n"
report.cisco.css.sorries.name=CSS Sorries
report.cisco.css.sorries.height=150
@@ -742,7 +742,7 @@ report.cisco.css.sorries.command=--title="Sorries per Second" --height 150 --wid
LINE1:Redirs#0000ff:"Sorries " \
GPRINT:Redirs:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:Redirs:MIN:"Min\\: %8.2lf %s" \
- GPRINT:Redirs:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:Redirs:MAX:"Max\\: %8.2lf %s\\n"
report.cisco.docs.macchan.name=Cable Modem (MacChannel) Users
report.cisco.docs.macchan.columns=MacCmTotal,MacCmActive,MacCmReg
diff --git a/snmp-graph.properties.d/ciscoNexus-graph.properties b/snmp-graph.properties.d/ciscoNexus-graph.properties
index b98e341..b465dd9 100644
--- a/snmp-graph.properties.d/ciscoNexus-graph.properties
+++ b/snmp-graph.properties.d/ciscoNexus-graph.properties
@@ -142,4 +142,5 @@ report.cisco.nexus.env.command=--title="Environmental Status of {entPhysicalName
LINE1:entSensor#f57900:"Temperature" \
GPRINT:entSensor:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:entSensor:MIN:"Min \\: %8.2lf %s" \
- GPRINT:entSensor:MAX:"Max \\: %8.2lf %s\\n"
+ GPRINT:entSensor:MAX:"Max \\: %8.2lf %s\\n"
+
diff --git a/snmp-graph.properties.d/clavister-graph.properties b/snmp-graph.properties.d/clavister-graph.properties
index c8f87b0..0269e7a 100644
--- a/snmp-graph.properties.d/clavister-graph.properties
+++ b/snmp-graph.properties.d/clavister-graph.properties
@@ -294,7 +294,7 @@ report.clavister.ipsec.info.command=--title="Clavister IPsec Informational Excha
LINE2:clvIPsecInfoFailed#a40000:"Failed" \
GPRINT:clvIPsecInfoFailed:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:clvIPsecInfoFailed:MIN:"Min \\: %8.2lf %s" \
- GPRINT:clvIPsecInfoFailed:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:clvIPsecInfoFailed:MAX:"Max \\: %8.2lf %s\\n"
report.clavister.ipsec.bits.name=Clavister IPsec Bits In/Out
report.clavister.ipsec.bits.columns=clvIPsecInOctComp,clvIPsecInOctUncomp,clvIPsecOutOctComp,clvIPsecOutOctUncom
@@ -798,7 +798,7 @@ report.clavister.vlan.untaggedpkts.command=--title="Clavister VLAN Untagged Pack
COMMENT:" Total" \
GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:total:MIN:"Min \\: %8.2lf %s" \
- GPRINT:total:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:total:MAX:"Max \\: %8.2lf %s\\n"
report.clavister.vlan.untaggedocts.name=Clavister VLAN Untagged Bytes
report.clavister.vlan.untaggedocts.columns=clvIfVlUntInOctets,clvIfVlUntOutOctets,clvIfVlUntTotOctets
@@ -823,7 +823,7 @@ report.clavister.vlan.untaggedocts.command=--title="Clavister VLAN Untagged Byte
COMMENT:" Total" \
GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:total:MIN:"Min \\: %8.2lf %s" \
- GPRINT:total:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:total:MAX:"Max \\: %8.2lf %s\\n"
#
#clvDHCPRelayRuleTable
@@ -1172,7 +1172,7 @@ report.clavister.system.connpersec.command=--title="Clavister System Connections
LINE1:closedInv#3465a4:"Closed" \
GPRINT:closed:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:closed:MIN:"Min \\: %8.2lf %s" \
- GPRINT:closed:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:closed:MAX:"Max \\: %8.2lf %s\\n"
report.clavister.system.hcfwdbits.name=Clavister System Bits Forwarded (High Speed)
report.clavister.system.hcfwdbits.columns=clvSysHCFwdBits
diff --git a/snmp-graph.properties.d/ejn-graph.properties b/snmp-graph.properties.d/ejn-graph.properties
index 72e96f6..ecb0bce 100644
--- a/snmp-graph.properties.d/ejn-graph.properties
+++ b/snmp-graph.properties.d/ejn-graph.properties
@@ -80,7 +80,7 @@ report.ejnggsn.apn.users.command=--title="APN {ApnName} Active PDP Contexts" \
LINE2:active#0000ff:"PDP Contexts" \
GPRINT:active:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:active:MIN:"Min \\: %8.2lf %s" \
- GPRINT:active:MAX:"Max \\: %8.2lf %s\\n"
+ GPRINT:active:MAX:"Max \\: %8.2lf %s\\n"
report.ejnggsn.apn.ippool.name=GGSN APN Available IPs
report.ejnggsn.apn.ippool.columns=ApnFreePoolIps
diff --git a/snmp-graph.properties.d/elasticsearch-graph.properties b/snmp-graph.properties.d/elasticsearch-graph.properties
new file mode 100644
index 0000000..2731291
--- /dev/null
+++ b/snmp-graph.properties.d/elasticsearch-graph.properties
@@ -0,0 +1,581 @@
+reports=elasticsearch.cluster.indices.count, \
+ elasticsearch.cluster.indices.shards.index.shards, \
+ elasticsearch.cluster.indices.shards.index.primaries, \
+ elasticsearch.cluster.indices.shards.index.replication, \
+ elasticsearch.cluster.indices.docs, \
+ elasticsearch.cluster.indices.store, \
+ elasticsearch.cluster.indices.throttle.time, \
+ elasticsearch.cluster.indices.fielddata.size, \
+ elasticsearch.cluster.indices.fielddata.evictions, \
+ elasticsearch.cluster.indices.filtercache.size, \
+ elasticsearch.cluster.indices.filtercache.evictions, \
+ elasticsearch.cluster.indices.idcache.size, \
+ elasticsearch.cluster.indices.completion.size, \
+ elasticsearch.cluster.indices.segments.count, \
+ elasticsearch.cluster.indices.segments.memory, \
+ elasticsearch.cluster.indices.perculate.total, \
+ elasticsearch.cluster.indices.perculate.time, \
+ elasticsearch.cluster.indices.perculate.current, \
+ elasticsearch.cluster.indices.perculate.size, \
+ elasticsearch.cluster.indices.perculate.queries, \
+ elasticsearch.cluster.nodes, \
+ elasticsearch.cluster.nodes.os.processors, \
+ elasticsearch.cluster.nodes.os.memory.total, \
+ elasticsearch.cluster.nodes.os.cpu, \
+ elasticsearch.cluster.nodes.open.filedescriptors, \
+ elasticsearch.cluster.node.jvm.uptime, \
+ elasticsearch.cluster.node.jvm.memory, \
+ elasticsearch.cluster.node.jvm.threads, \
+ elasticsearch.cluster.node.fs.size, \
+ elasticsearch.cluster.node.fs.dsk.io.ops, \
+ elasticsearch.cluster.node.fs.dsk.io.size, \
+ elasticsearch.cluster.node.fs.dsk.queue, \
+ elasticsearch.cluster.node.fs.dsk.svc.time
+
+report.elasticsearch.cluster.indices.count.name=ES Cluster Indices
+report.elasticsearch.cluster.indices.count.columns=indicesCount
+report.elasticsearch.cluster.indices.count.type=nodeSnmp
+report.elasticsearch.cluster.indices.count.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.count.command=--title="ES Cluster Indices: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Count" \
+ DEF:val1={rrd1}:indicesCount:AVERAGE \
+ AREA:val1#babdb6 \
+ LINE2:val1#888a85:"Indices " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.shards.index.shards.name=ES Cluster Index Shards
+report.elasticsearch.cluster.indices.shards.index.shards.columns=shardsMin, shardsMax, shardsAvg
+report.elasticsearch.cluster.indices.shards.index.shards.type=nodeSnmp
+report.elasticsearch.cluster.indices.shards.index.shards.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.shards.index.shards.command=--title="ES Cluster Index Shards: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Shards" \
+ DEF:val1={rrd1}:shardsMin:AVERAGE \
+ DEF:val2={rrd2}:shardsMax:AVERAGE \
+ DEF:val3={rrd3}:shardsAvg:AVERAGE \
+ COMMENT:"Index Shards\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n" \
+ LINE2:val3#c17d11:"Avg " \
+ GPRINT:val3:AVERAGE:"%10.2lf\\n"
+
+report.elasticsearch.cluster.indices.shards.index.primaries.name=ES Cluster Index Primaries
+report.elasticsearch.cluster.indices.shards.index.primaries.columns=primariesMin, primariesMax, primariesAvg
+report.elasticsearch.cluster.indices.shards.index.primaries.type=nodeSnmp
+report.elasticsearch.cluster.indices.shards.index.primaries.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.shards.index.primaries.command=--title="ES Cluster Index Primaries: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Primaries" \
+ DEF:val1={rrd1}:primariesMin:AVERAGE \
+ DEF:val2={rrd2}:primariesMax:AVERAGE \
+ DEF:val3={rrd3}:primariesAvg:AVERAGE \
+ COMMENT:"Index Primaries\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n" \
+ LINE2:val3#c17d11:"Avg " \
+ GPRINT:val3:AVERAGE:"%10.2lf\\n"
+
+report.elasticsearch.cluster.indices.shards.index.replication.name=ES Cluster Index Replication
+report.elasticsearch.cluster.indices.shards.index.replication.columns=replicationMin, replicationMax, replicationAvg
+report.elasticsearch.cluster.indices.shards.index.replication.type=nodeSnmp
+report.elasticsearch.cluster.indices.shards.index.replication.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.shards.index.replication.command=--title="ES Cluster Index Replication: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Replication" \
+ DEF:val1={rrd1}:replicationMin:AVERAGE \
+ DEF:val2={rrd2}:replicationMax:AVERAGE \
+ DEF:val3={rrd3}:replicationAvg:AVERAGE \
+ COMMENT:"Index Replication\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n" \
+ LINE2:val3#c17d11:"Avg " \
+ GPRINT:val3:AVERAGE:"%10.2lf\\n"
+
+report.elasticsearch.cluster.indices.docs.name=ES Cluster Indices Documents
+report.elasticsearch.cluster.indices.docs.columns=docsCount, docsDeleted
+report.elasticsearch.cluster.indices.docs.type=nodeSnmp
+report.elasticsearch.cluster.indices.docs.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.docs.command=--title="ES Cluster Indices Documents: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Documents" \
+ DEF:val1={rrd1}:docsCount:AVERAGE \
+ DEF:val2={rrd2}:docsDeleted:AVERAGE \
+ LINE2:val1#729fcf:"Count " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#cc0000:"Deleted " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.store.name=ES Cluster Indices Store Size
+report.elasticsearch.cluster.indices.store.columns=storeSizeBytes
+report.elasticsearch.cluster.indices.store.type=nodeSnmp
+report.elasticsearch.cluster.indices.store.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.store.command=--title="ES Cluster Indices Store Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:storeSizeBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"Store Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.throttle.time.name=ES Cluster Indices Store Throttle Time
+report.elasticsearch.cluster.indices.throttle.time.columns=throttleTimeMillis
+report.elasticsearch.cluster.indices.throttle.time.type=nodeSnmp
+report.elasticsearch.cluster.indices.throttle.time.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.throttle.time.command=--title="ES Cluster Indices Store Throttle Time: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Milliseconds" \
+ DEF:val1={rrd1}:throttleTimeMillis:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Time in ms" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.fielddata.size.name=ES Cluster Indices Field Data Size
+report.elasticsearch.cluster.indices.fielddata.size.columns=memorySizeBytes
+report.elasticsearch.cluster.indices.fielddata.size.type=nodeSnmp
+report.elasticsearch.cluster.indices.fielddata.size.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.fielddata.size.command=--title="ES Cluster Indices Field Data Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:memorySizeBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"Field Data Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.fielddata.evictions.name=ES Cluster Indices Field Data Evictions
+report.elasticsearch.cluster.indices.fielddata.evictions.columns=memoryEvictions
+report.elasticsearch.cluster.indices.fielddata.evictions.type=nodeSnmp
+report.elasticsearch.cluster.indices.fielddata.evictions.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.fielddata.evictions.command=--title="ES Cluster Indices Field Data Evictions: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Evictions" \
+ DEF:val1={rrd1}:memoryEvictions:AVERAGE \
+ AREA:val1#e9b96e \
+ LINE2:val1#c17d11:"Field Data Evictions" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.filtercache.size.name=ES Cluster Indices Filter Cache Size
+report.elasticsearch.cluster.indices.filtercache.size.columns=fltrCacheBytes
+report.elasticsearch.cluster.indices.filtercache.size.type=nodeSnmp
+report.elasticsearch.cluster.indices.filtercache.size.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.filtercache.size.command=--title="ES Cluster Indices Filter Cache Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:fltrCacheBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"Filter Cache Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.filtercache.evictions.name=ES Cluster Indices Filter Cache Evictions
+report.elasticsearch.cluster.indices.filtercache.evictions.columns=fltrCacheEvictions
+report.elasticsearch.cluster.indices.filtercache.evictions.type=nodeSnmp
+report.elasticsearch.cluster.indices.filtercache.evictions.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.filtercache.evictions.command=--title="ES Cluster Indices Filter Cache Evictions: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Evictions" \
+ DEF:val1={rrd1}:fltrCacheEvictions:AVERAGE \
+ AREA:val1#e9b96e \
+ LINE2:val1#c17d11:"Field Cache Evictions" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.idcache.size.name=ES Cluster Indices ID Cache Size
+report.elasticsearch.cluster.indices.idcache.size.columns=idCacheBytes
+report.elasticsearch.cluster.indices.idcache.size.type=nodeSnmp
+report.elasticsearch.cluster.indices.idcache.size.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.idcache.size.command=--title="ES Cluster Indices ID Cache Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:idCacheBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"ID Cache Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.completion.size.name=ES Cluster Indices Completion Size
+report.elasticsearch.cluster.indices.completion.size.columns=completionSizeBytes
+report.elasticsearch.cluster.indices.completion.size.type=nodeSnmp
+report.elasticsearch.cluster.indices.completion.size.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.completion.size.command=--title="ES Cluster Indices Completion Count: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:completionSizeBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"Completion Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.segments.count.name=ES Cluster Indices Segments Count
+report.elasticsearch.cluster.indices.segments.count.columns=segCount
+report.elasticsearch.cluster.indices.segments.count.type=nodeSnmp
+report.elasticsearch.cluster.indices.segments.count.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.segments.count.command=--title="ES Cluster Indices Segments Count: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Count" \
+ DEF:val1={rrd1}:segCount:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Segments" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.segments.memory.name=ES Cluster Indices Segments Memory
+report.elasticsearch.cluster.indices.segments.memory.columns=segMemory, segWriterMem, segWriterMemMax, segVerMapMem, segFixBitSetMem
+report.elasticsearch.cluster.indices.segments.memory.type=nodeSnmp
+report.elasticsearch.cluster.indices.segments.memory.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.segments.memory.command=--title="ES Cluster Indices Segments Memory: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:segMemory:AVERAGE \
+ DEF:val2={rrd2}:segWriterMem:AVERAGE \
+ DEF:val3={rrd3}:segWriterMemMax:AVERAGE \
+ DEF:val4={rrd4}:segVerMapMem:AVERAGE \
+ DEF:val5={rrd5}:segFixBitSetMem:AVERAGE \
+ LINE2:val1#75507b:"Memory " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#729fcf:"Writer Memory " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val3#204a87:"Writer Memory Max " \
+ GPRINT:val3:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val3:MIN:"Min \\: %10.2lf" \
+ GPRINT:val3:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val4#c17d11:"Version Map Memory " \
+ GPRINT:val4:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val4:MIN:"Min \\: %10.2lf" \
+ GPRINT:val4:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val5#888a85:"Fixed Bit Set Memory " \
+ GPRINT:val5:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val5:MIN:"Min \\: %10.2lf" \
+ GPRINT:val5:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.perculate.total.name=ES Cluster Indices Perculate Total
+report.elasticsearch.cluster.indices.perculate.total.columns=percTotal
+report.elasticsearch.cluster.indices.perculate.total.type=nodeSnmp
+report.elasticsearch.cluster.indices.perculate.total.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.perculate.total.command=--title="ES Cluster Indices Perculate Total: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Total" \
+ DEF:val1={rrd1}:percTotal:AVERAGE \
+ LINE2:val1#75507b:"Perculate " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.perculate.time.name=ES Cluster Indices Perculate Time
+report.elasticsearch.cluster.indices.perculate.time.columns=percTimeInMillis
+report.elasticsearch.cluster.indices.perculate.time.type=nodeSnmp
+report.elasticsearch.cluster.indices.perculate.time.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.perculate.time.command=--title="ES Cluster Indices Perculate Time: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Milliseconds" \
+ DEF:val1={rrd1}:percTimeInMillis:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Time in ms" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.perculate.current.name=ES Cluster Indices Perculate Current
+report.elasticsearch.cluster.indices.perculate.current.columns=percCurrent
+report.elasticsearch.cluster.indices.perculate.current.type=nodeSnmp
+report.elasticsearch.cluster.indices.perculate.current.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.perculate.current.command=--title="ES Cluster Indices Perculate Current: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Current" \
+ DEF:val1={rrd1}:percCurrent:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Current" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.perculate.size.name=ES Cluster Indices Perculate Size
+report.elasticsearch.cluster.indices.perculate.size.columns=percMemSizeBytes
+report.elasticsearch.cluster.indices.perculate.size.type=nodeSnmp
+report.elasticsearch.cluster.indices.perculate.size.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.perculate.size.command=--title="ES Cluster Indices Perculate Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:percMemSizeBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"Size" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.indices.perculate.queries.name=ES Cluster Indices Perculate Queries
+report.elasticsearch.cluster.indices.perculate.queries.columns=percQueries
+report.elasticsearch.cluster.indices.perculate.queries.type=nodeSnmp
+report.elasticsearch.cluster.indices.perculate.queries.propertiesValues=clusterName
+report.elasticsearch.cluster.indices.perculate.queries.command=--title="ES Cluster Indices Perculate Queries: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Queries" \
+ DEF:val1={rrd1}:percQueries:AVERAGE \
+ LINE2:val1#75507b:"Queries" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.nodes.name=ES Cluster Nodes
+report.elasticsearch.cluster.nodes.columns=nodesCntTotal, nodesCntMstOnly, nodesCntDataOnly, nodesCntMasterData, nodesClient
+report.elasticsearch.cluster.nodes.type=nodeSnmp
+report.elasticsearch.cluster.nodes.propertiesValues=clusterName
+report.elasticsearch.cluster.nodes.command=--title="ES Cluster Indices Perculate Queries: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Nodes" \
+ DEF:val1={rrd1}:nodesCntTotal:AVERAGE \
+ DEF:val2={rrd2}:nodesCntMstOnly:AVERAGE \
+ DEF:val3={rrd3}:nodesCntDataOnly:AVERAGE \
+ DEF:val4={rrd4}:nodesCntMasterData:AVERAGE \
+ DEF:val5={rrd5}:nodesClient:AVERAGE \
+ LINE2:val1#75507b:"Total " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#c17d11:"Master Only " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val3#75507b:"Data Only " \
+ GPRINT:val3:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val3:MIN:"Min \\: %10.2lf" \
+ GPRINT:val3:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val4#3465a4:"Master Data " \
+ GPRINT:val4:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val4:MIN:"Min \\: %10.2lf" \
+ GPRINT:val4:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val5#888a85:"Clients " \
+ GPRINT:val5:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val5:MIN:"Min \\: %10.2lf" \
+ GPRINT:val5:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.nodes.os.processors.name=ES Cluster Nodes Processors
+report.elasticsearch.cluster.nodes.os.processors.columns=osAvailProc
+report.elasticsearch.cluster.nodes.os.processors.type=nodeSnmp
+report.elasticsearch.cluster.nodes.os.processors.propertiesValues=clusterName
+report.elasticsearch.cluster.nodes.os.processors.command=--title="ES Cluster Node Processors: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Processors" \
+ DEF:val1={rrd1}:osAvailProc:AVERAGE \
+ LINE2:val1#75507b:"Processors" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.nodes.os.memory.total.name=ES Cluster Nodes OS Memory Total
+report.elasticsearch.cluster.nodes.os.memory.total.columns=osMemTotalBytes
+report.elasticsearch.cluster.nodes.os.memory.total.type=nodeSnmp
+report.elasticsearch.cluster.nodes.os.memory.total.propertiesValues=clusterName
+report.elasticsearch.cluster.nodes.os.memory.total.command=--title="ES Cluster Node OS Memory Total: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:osMemTotalBytes:AVERAGE \
+ AREA:val1#729fcf \
+ LINE2:val1#3465a4:"OS Memory Total" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.nodes.os.cpu.name=ES Cluster Nodes Process CPU
+report.elasticsearch.cluster.nodes.os.cpu.columns=procCpuPercent
+report.elasticsearch.cluster.nodes.os.cpu.type=nodeSnmp
+report.elasticsearch.cluster.nodes.os.cpu.propertiesValues=clusterName
+report.elasticsearch.cluster.nodes.os.cpu.command=--title="ES Cluster Node Process CPU: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Percent" \
+ DEF:val1={rrd1}:procCpuPercent:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Process CPU" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.nodes.open.filedescriptors.name=ES Cluster Nodes Open File Descriptors
+report.elasticsearch.cluster.nodes.open.filedescriptors.columns=openFileDescMin, openFileDescMax, openFileDescAvg
+report.elasticsearch.cluster.nodes.open.filedescriptors.type=nodeSnmp
+report.elasticsearch.cluster.nodes.open.filedescriptors.propertiesValues=clusterName
+report.elasticsearch.cluster.nodes.open.filedescriptors.command=--title="ES Cluster Node Open File Descriptors: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Open File Descriptors" \
+ DEF:val1={rrd1}:openFileDescMin:AVERAGE \
+ DEF:val2={rrd2}:openFileDescMax:AVERAGE \
+ DEF:val3={rrd3}:openFileDescAvg:AVERAGE \
+ COMMENT:"Open File Descriptors\\n" \
+ LINE2:val1#ad7fa8:"Min " \
+ GPRINT:val1:MIN:"%10.2lf\\n" \
+ LINE2:val2#729fcf:"Max " \
+ GPRINT:val2:MAX:"%10.2lf\\n" \
+ LINE2:val3#c17d11:"Avg " \
+ GPRINT:val3:AVERAGE:"%10.2lf\\n"
+
+report.elasticsearch.cluster.node.jvm.uptime.name=ES Cluster Node JVM Uptime
+report.elasticsearch.cluster.node.jvm.uptime.columns=jvmMaxUptimeMillis
+report.elasticsearch.cluster.node.jvm.uptime.type=nodeSnmp
+report.elasticsearch.cluster.node.jvm.uptime.propertiesValues=clusterName
+report.elasticsearch.cluster.node.jvm.uptime.command=--title="ES Cluster Node JVM Uptime: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Milliseconds" \
+ DEF:val1={rrd1}:jvmMaxUptimeMillis:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"Time in ms" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.jvm.memory.name=ES Cluster Node JVM Memory
+report.elasticsearch.cluster.node.jvm.memory.columns=jvmMemHeapBytes, jvmMemMaxBytes
+report.elasticsearch.cluster.node.jvm.memory.type=nodeSnmp
+report.elasticsearch.cluster.node.jvm.memory.propertiesValues=clusterName
+report.elasticsearch.cluster.node.jvm.memory.command=--title="ES Cluster Node JVM Memory: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:jvmMemHeapBytes:AVERAGE \
+ DEF:val2={rrd2}:jvmMemMaxBytes:AVERAGE \
+ LINE2:val1#f57900:"JVM Heap Used" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#cc0000:"JVM Heap Max " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.jvm.threads.name=ES Cluster Node JVM Threads
+report.elasticsearch.cluster.node.jvm.threads.columns=jvmThreads
+report.elasticsearch.cluster.node.jvm.threads.type=nodeSnmp
+report.elasticsearch.cluster.node.jvm.threads.propertiesValues=clusterName
+report.elasticsearch.cluster.node.jvm.threads.command=--title="ES Cluster Node JVM Threads: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Threads" \
+ DEF:val1={rrd1}:jvmThreads:AVERAGE \
+ AREA:val1#ad7fa8 \
+ LINE2:val1#75507b:"JVM Threads" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.fs.size.name=ES Cluster Node File System Size
+report.elasticsearch.cluster.node.fs.size.columns=fsTotalBytes, fsFreeBytes, fsAvailBytes
+report.elasticsearch.cluster.node.fs.size.type=nodeSnmp
+report.elasticsearch.cluster.node.fs.size.propertiesValues=clusterName
+report.elasticsearch.cluster.node.fs.size.command=--title="ES Cluster File System Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:fsTotalBytes:AVERAGE \
+ DEF:val2={rrd2}:fsFreeBytes:AVERAGE \
+ DEF:val3={rrd3}:fsAvailBytes:AVERAGE \
+ LINE2:val1#3465a4:"File System Total " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#4e9a06:"File System Free " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val3#555753:"File System Available " \
+ GPRINT:val3:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val3:MIN:"Min \\: %10.2lf" \
+ GPRINT:val3:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.fs.dsk.io.ops.name=ES Cluster Node File System I/O Ops
+report.elasticsearch.cluster.node.fs.dsk.io.ops.columns=fsDskReads, fsDskWrites, fsDskIoOp
+report.elasticsearch.cluster.node.fs.dsk.io.ops.type=nodeSnmp
+report.elasticsearch.cluster.node.fs.dsk.io.ops.propertiesValues=clusterName
+report.elasticsearch.cluster.node.fs.dsk.io.ops.command=--title="ES Cluster File System I/O Ops: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="I/O Ops" \
+ DEF:val1={rrd1}:fsDskReads:AVERAGE \
+ DEF:val2={rrd2}:fsDskWrites:AVERAGE \
+ DEF:val3={rrd3}:fsDskIoOp:AVERAGE \
+ LINE2:val1#4e9a06:"Disk I/O Read Ops " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#3465a4:"Disk I/O Write Ops" \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val3#f57900:"Disk I/O Ops Total" \
+ GPRINT:val3:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val3:MIN:"Min \\: %10.2lf" \
+ GPRINT:val3:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.fs.dsk.io.size.name=ES Cluster Node File System I/O Size
+report.elasticsearch.cluster.node.fs.dsk.io.size.columns=fsDskReadSizeBytes, fsDskWriteSizeBytes, fsDskIoSizeBytes
+report.elasticsearch.cluster.node.fs.dsk.io.size.type=nodeSnmp
+report.elasticsearch.cluster.node.fs.dsk.io.size.propertiesValues=clusterName
+report.elasticsearch.cluster.node.fs.dsk.io.size.command=--title="ES Cluster File System I/O Size: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Bytes" \
+ DEF:val1={rrd1}:fsDskReadSizeBytes:AVERAGE \
+ DEF:val2={rrd2}:fsDskWriteSizeBytes:AVERAGE \
+ DEF:val3={rrd3}:fsDskIoSizeBytes:AVERAGE \
+ LINE2:val1#4e9a06:"Disk I/O Read size " \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val2#3465a4:"Disk I/O Writes " \
+ GPRINT:val2:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val2:MIN:"Min \\: %10.2lf" \
+ GPRINT:val2:MAX:"Max \\: %10.2lf\\n" \
+ LINE2:val3#f57900:"Disk I/O Size Total" \
+ GPRINT:val3:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val3:MIN:"Min \\: %10.2lf" \
+ GPRINT:val3:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.fs.dsk.queue.name=ES Cluster Node File System Disk Queue
+report.elasticsearch.cluster.node.fs.dsk.queue.columns=fsDskQueue
+report.elasticsearch.cluster.node.fs.dsk.queue.type=nodeSnmp
+report.elasticsearch.cluster.node.fs.dsk.queue.propertiesValues=clusterName
+report.elasticsearch.cluster.node.fs.dsk.queue.command=--title="ES Cluster Node File System Disk Queue: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Queue" \
+ DEF:val1={rrd1}:fsDskQueue:AVERAGE \
+ AREA:val1#fcaf3e \
+ LINE2:val1#f57900:"Disk Queue" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
+
+report.elasticsearch.cluster.node.fs.dsk.svc.time.name=ES Cluster Node File System Disk Service Time
+report.elasticsearch.cluster.node.fs.dsk.svc.time.columns=fsDskSvcTime
+report.elasticsearch.cluster.node.fs.dsk.svc.time.type=nodeSnmp
+report.elasticsearch.cluster.node.fs.dsk.svc.time.propertiesValues=clusterName
+report.elasticsearch.cluster.node.fs.dsk.svc.time.command=--title="ES Cluster Node File System Disk Service Time: {clusterName}" \
+ --units-exponent=0 \
+ --vertical-label="Service Time" \
+ DEF:val1={rrd1}:fsDskSvcTime:AVERAGE \
+ AREA:val1#fcaf3e \
+ LINE2:val1#f57900:"Disk Queue" \
+ GPRINT:val1:AVERAGE:"Avg \\: %10.2lf" \
+ GPRINT:val1:MIN:"Min \\: %10.2lf" \
+ GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
\ No newline at end of file
diff --git a/snmp-graph.properties.d/f5-graph.properties b/snmp-graph.properties.d/f5-graph.properties
index 2e5ad73..c6ae650 100644
--- a/snmp-graph.properties.d/f5-graph.properties
+++ b/snmp-graph.properties.d/f5-graph.properties
@@ -130,7 +130,7 @@ report.bigip.lvsconns.command=--title="Virtual Server Current Connections (F5)"
LINE2:curConns#4e9a06:"Current " \
GPRINT:curConns:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:curConns:MIN:"Min \\: %8.2lf %s" \
- GPRINT:curConns:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:curConns:MAX:"Max \\: %8.2lf %s\\n"
report.bigip.nonodeerrs.name=LTM Virtual Server No Node Errors
report.bigip.nonodeerrs.columns=vsNoNodeErrs
diff --git a/snmp-graph.properties.d/fortinet-fortigate-application-v5.2-graph.properties b/snmp-graph.properties.d/fortinet-fortigate-application-v5.2-graph.properties
index 4cfc452..b5592d4 100644
--- a/snmp-graph.properties.d/fortinet-fortigate-application-v5.2-graph.properties
+++ b/snmp-graph.properties.d/fortinet-fortigate-application-v5.2-graph.properties
@@ -377,12 +377,12 @@ report.fortinet.fgApFTPStatsEntry.stats.command=--title="Fortigate FTP Proxy Sta
GPRINT:val1:MAX:"Max \\: %10.2lf\\n"
report.fortinet.fgApFTPStatsEntry.connection.name=Fortigate FTP Proxy Connections Statistics
-report.fortinet.fgApFTPStatsEntry.connection.columns=fgApFTPMaxConnections, fgApFTPConnections
+report.fortinet.fgApFTPStatsEntry.connection.columns=fgApFTPMaxConns, fgApFTPConns
report.fortinet.fgApFTPStatsEntry.connection.type=nodeSnmp
report.fortinet.fgApFTPStatsEntry.connection.command=--title="Fortigate FTP Proxy Connections Statistics" \
--vertical-label="number" \
- DEF:val1={rrd1}:fgApFTPMaxConnections:AVERAGE \
- DEF:val2={rrd2}:fgApFTPConnections:AVERAGE \
+ DEF:val1={rrd1}:fgApFTPMaxConns:AVERAGE \
+ DEF:val2={rrd2}:fgApFTPConns:AVERAGE \
LINE1:val1#cc0000:"max Connections" \
GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:val1:MIN:"Min \\: %8.2lf %s" \
@@ -410,13 +410,13 @@ report.fortinet.fgWebCacheDiskUsage.command=--title="Fortigate Web Cache Usage f
GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n"
report.fortinet.fgWebChDskStsEntry.name=Fortigate Web Cache Disk Statistics
-report.fortinet.fgWebChDskStsEntry.columns=fgWebCacheDiskHits, fgWebCacheDiskMisses
+report.fortinet.fgWebChDskStsEntry.columns=fgWebCacheDiskHits, fgWebCacheDiskMiss
report.fortinet.fgWebChDskStsEntry.type=fgWebChDskStsEntry
report.fortinet.fgWebChDskStsEntry.propertiesValues=fgWebCacheDisk
report.fortinet.fgWebChDskStsEntry.command=--title="Fortigate Web Cache Disk Statistics for Disk: {fgWebCacheDisk}" \
--vertical-label="number" \
DEF:val1={rrd1}:fgWebCacheDiskHits:AVERAGE \
- DEF:val2={rrd2}:fgWebCacheDiskMisses:AVERAGE \
+ DEF:val2={rrd2}:fgWebCacheDiskMiss:AVERAGE \
AREA:val1#cc0000:"Hits " \
GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:val1:MIN:"Min \\: %8.2lf %s" \
diff --git a/snmp-graph.properties.d/hwg-graph.properties b/snmp-graph.properties.d/hwg-graph.properties
index 766074c..5fd10b5 100644
--- a/snmp-graph.properties.d/hwg-graph.properties
+++ b/snmp-graph.properties.d/hwg-graph.properties
@@ -36,13 +36,12 @@ report.sensTable.sensValue.command=--title="The Value Reported by {sensName} in
LINE2:var#000000:"Value" \
GPRINT:var:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:var:MIN:"Min\\: %8.2lf %s" \
- GPRINT:var:MAX:"Max\\: %8.2lf %s\n" \
+ GPRINT:var:MAX:"Max\\: %8.2lf %s\\n" \
LINE2:max#A00000:"Max " \
GPRINT:max:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:max:MIN:"Min\\: %8.2lf %s" \
- GPRINT:max:MAX:"Max\\: %8.2lf %s\n" \
+ GPRINT:max:MAX:"Max\\: %8.2lf %s\\n" \
LINE2:min#0000A0:"Min " \
GPRINT:min:AVERAGE:"Avg\\: %8.2lf %s" \
GPRINT:min:MIN:"Min\\: %8.2lf %s" \
- GPRINT:min:MAX:"Max\\: %8.2lf %s\n
-
+ GPRINT:min:MAX:"Max\\: %8.2lf %s\\n"
\ No newline at end of file
diff --git a/snmp-graph.properties.d/ipunity-graph.properties b/snmp-graph.properties.d/ipunity-graph.properties
index 6fa331f..e616566 100644
--- a/snmp-graph.properties.d/ipunity-graph.properties
+++ b/snmp-graph.properties.d/ipunity-graph.properties
@@ -589,7 +589,7 @@ report.ipunity.sip.methodDetail.command=--title="SIP Method Detail ({applDescrip
STACK:infoOutInv#ff7200:"INFO " \
GPRINT:infoOut:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:infoOut:MIN:" Min \\: %8.2lf %s" \
- GPRINT:infoOut:MAX:" Max \\: %8.2lf %s\\n" \
+ GPRINT:infoOut:MAX:" Max \\: %8.2lf %s\\n"
report.ipunity.sip.statusCodeDetail.name=SIP Status Detail (IP Unity)
report.ipunity.sip.statusCodeDetail.columns=ipuSIPInfoClsIn,ipuSIPInfoClsOut,ipuSIPSuccClsIn,ipuSIPSuccClsOut,ipuSIPRedirClsIn,ipuSIPRedirClsOut,ipuSIPReqFailClsIn,ipuSIPReqFailClsOut,ipuSIPSrvFailClsIn,ipuSIPSrvFailClsOut,ipuSIPGblFailClsIn,ipuSIPGblFailClsOut
diff --git a/snmp-graph.properties.d/jboss-graph.properties b/snmp-graph.properties.d/jboss-graph.properties
index 0eb2b2b..ba63953 100644
--- a/snmp-graph.properties.d/jboss-graph.properties
+++ b/snmp-graph.properties.d/jboss-graph.properties
@@ -119,7 +119,7 @@ report.jboss.grp.time.command=--title="HTTP Global Request Processor - Time" \
LINE2:proc#0000ff:"ProcessTime" \
GPRINT:proc:AVERAGE:" Avg \\: %6.2lf %s" \
GPRINT:proc:MIN:"Min \\: %6.2lf %s" \
- GPRINT:proc:MAX:"Max \\: %6.2lf %s\\n" \
+ GPRINT:proc:MAX:"Max \\: %6.2lf %s\\n"
report.jboss.http.tp.name=Http Thread Pool
report.jboss.http.tp.columns=BusyThreads, Threads
diff --git a/snmp-graph.properties.d/juniper-graph.properties b/snmp-graph.properties.d/juniper-graph.properties
index 6d69707..6684f3d 100644
--- a/snmp-graph.properties.d/juniper-graph.properties
+++ b/snmp-graph.properties.d/juniper-graph.properties
@@ -387,5 +387,5 @@ report.ive.connections.command=--title="Juniper IVE Users" \
LINE2:iveConcurrentUsers#ff0000:"iveConcurrentUsers" \
GPRINT:iveConcurrentUsers:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:iveConcurrentUsers:MIN:"Min \\: %8.2lf %s" \
- GPRINT:iveConcurrentUsers:MAX:"Max \\: %8.2lf %s\\n"
+ GPRINT:iveConcurrentUsers:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/kafka-graph.properties b/snmp-graph.properties.d/kafka-graph.properties
new file mode 100644
index 0000000..7ebfc6c
--- /dev/null
+++ b/snmp-graph.properties.d/kafka-graph.properties
@@ -0,0 +1,581 @@
+reports=\
+kafka.syslogInBps, \
+kafka.syslogMps, \
+kafka.syslogOutBps, \
+kafka.trapsInBps, \
+kafka.trapsMps, \
+kafka.trapsOutBps, \
+kafka.activeControllers, \
+kafka.bytesInPerSec, \
+kafka.bytesOutPerSec, \
+kafka.isrExpandsPerSec, \
+kafka.isrShrinksPerSec, \
+kafka.leaderEps, \
+kafka.leaders, \
+kafka.localTimeConsumer, \
+kafka.localTimeFollower, \
+kafka.localTimeProduce, \
+kafka.msgInPerSec, \
+kafka.netProcAvgIdle, \
+kafka.offlinePartitions, \
+kafka.partitions, \
+kafka.purgatoryFetch, \
+kafka.purgatoryProduce, \
+kafka.queueTimeConsumer, \
+kafka.queueTimeFollower, \
+kafka.queueTimeProduce, \
+kafka.remoteTimeConsumer, \
+kafka.remoteTimeFollower, \
+kafka.remoteTimeProduce, \
+kafka.replicaMaxLag, \
+kafka.reqHandAvgIdle, \
+kafka.reqSecConsumer, \
+kafka.reqSecFollower, \
+kafka.reqSecProduce, \
+kafka.sendTimeConsumer, \
+kafka.sendTimeFollower, \
+kafka.sendTimeProduce, \
+kafka.totalTimeConsumer, \
+kafka.totalTimeFollower, \
+kafka.totalTimeProduce, \
+kafka.uncleanLeaderEps, \
+kafka.underReplPart
+
+
+report.kafka.syslogInBps.name=Syslog Bytes In Per Second
+report.kafka.syslogInBps.columns=syslogInBps
+report.kafka.syslogInBps.type=interfaceSnmp
+report.kafka.syslogInBps.command=--title="Syslog Bytes In Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:syslogInBps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes In" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.syslogOutBps.name=Syslog Bytes Out Per Second
+report.kafka.syslogOutBps.columns=syslogOutBps
+report.kafka.syslogOutBps.type=interfaceSnmp
+report.kafka.syslogOutBps.command=--title="Syslog Bytes Out Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:syslogOutBps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes Out" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.syslogMps.name=Syslog Messages In Per Second
+report.kafka.syslogMps.columns=syslogMps
+report.kafka.syslogMps.type=interfaceSnmp
+report.kafka.syslogMps.command=--title="Syslog Messages In Per Second" \
+ --vertical-label="Messages per second" \
+ DEF:value={rrd1}:syslogMps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Messages" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.trapsInBps.name=Trap Bytes In Per Second
+report.kafka.trapsInBps.columns=trapsInBps
+report.kafka.trapsInBps.type=interfaceSnmp
+report.kafka.trapsInBps.command=--title="Trap Bytes In Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:trapsInBps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes In" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.trapsOutBps.name=Trap Bytes Out Per Second
+report.kafka.trapsOutBps.columns=trapsOutBps
+report.kafka.trapsOutBps.type=interfaceSnmp
+report.kafka.trapsOutBps.command=--title="Trap Bytes Out Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:trapsOutBps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes Out" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.trapsMps.name=Trap Messages In Per Second
+report.kafka.trapsMps.columns=trapsMps
+report.kafka.trapsMps.type=interfaceSnmp
+report.kafka.trapsMps.command=--title="Trap Messages In Per Second" \
+ --vertical-label="Messages per second" \
+ DEF:value={rrd1}:trapsMps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Messages" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+
+report.kafka.msgInPerSec.name=Messages In Per Second
+report.kafka.msgInPerSec.columns=msgInPerSec
+report.kafka.msgInPerSec.type=interfaceSnmp
+report.kafka.msgInPerSec.command=--title="Messages In Per Second" \
+ --vertical-label="Messages per second" \
+ DEF:value={rrd1}:msgInPerSec:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Messages In Per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.bytesInPerSec.name=Bytes In Per Second
+report.kafka.bytesInPerSec.columns=bytesInPerSec
+report.kafka.bytesInPerSec.type=interfaceSnmp
+report.kafka.bytesInPerSec.command=--title="Bytes In Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:bytesInPerSec:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes In Per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.bytesOutPerSec.name=Bytes Out Per Second
+report.kafka.bytesOutPerSec.columns=bytesOutPerSec
+report.kafka.bytesOutPerSec.type=interfaceSnmp
+report.kafka.bytesOutPerSec.command=--title="Bytes Out Per Second" \
+ --vertical-label="Bytes per second" \
+ DEF:value={rrd1}:bytesOutPerSec:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Bytes Out Per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.underReplPart.name=Under-Replicated Partitions
+report.kafka.underReplPart.columns=underReplPart
+report.kafka.underReplPart.type=interfaceSnmp
+report.kafka.underReplPart.command=--title="Under-Replicated Partitions" \
+ --vertical-label="Partitions" \
+ DEF:value={rrd1}:underReplPart:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Under-Replicated Partitions" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.activeControllers.name=Active Controllers
+report.kafka.activeControllers.columns=activeControllers
+report.kafka.activeControllers.type=interfaceSnmp
+report.kafka.activeControllers.command=--title="Active Controllers" \
+ --vertical-label="Controllers" \
+ DEF:value={rrd1}:activeControllers:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Active Controllers" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.offlinePartitions.name=Offline Partitions
+report.kafka.offlinePartitions.columns=offlinePartitions
+report.kafka.offlinePartitions.type=interfaceSnmp
+report.kafka.offlinePartitions.command=--title="Offline Partitions" \
+ --vertical-label="Partitions" \
+ DEF:value={rrd1}:offlinePartitions:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Offline Partitions" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.leaderEps.name=Leader Election Rate and Time
+report.kafka.leaderEps.columns=leaderEps
+report.kafka.leaderEps.type=interfaceSnmp
+report.kafka.leaderEps.command=--title="Leader Election Rate and Time" \
+ --vertical-label="Elections per second" \
+ DEF:value={rrd1}:leaderEps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Elections per second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.uncleanLeaderEps.name=Unclean Leader Elections Per Second
+report.kafka.uncleanLeaderEps.columns=uncleanLeaderEps
+report.kafka.uncleanLeaderEps.type=interfaceSnmp
+report.kafka.uncleanLeaderEps.command=--title="Unclean Leader Elections Per Second" \
+ --vertical-label="Elections per second" \
+ DEF:value={rrd1}:uncleanLeaderEps:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Elections per second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.partitions.name=Partitions
+report.kafka.partitions.columns=partitions
+report.kafka.partitions.type=interfaceSnmp
+report.kafka.partitions.command=--title="Partitions" \
+ --vertical-label="Partitions" \
+ DEF:value={rrd1}:partitions:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Partitions" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.leaders.name=Leaders
+report.kafka.leaders.columns=leaders
+report.kafka.leaders.type=interfaceSnmp
+report.kafka.leaders.command=--title="Leaders" \
+ --vertical-label="Leaders" \
+ DEF:value={rrd1}:leaders:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Leaders" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.isrShrinksPerSec.name=In-Sync Replica Shrinks Per Second
+report.kafka.isrShrinksPerSec.columns=isrShrinksPerSec
+report.kafka.isrShrinksPerSec.type=interfaceSnmp
+report.kafka.isrShrinksPerSec.command=--title="In-Sync Replica Shrinks Per Second" \
+ --vertical-label="Shrinks per second" \
+ DEF:value={rrd1}:isrShrinksPerSec:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Shrinks per second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.isrExpandsPerSec.name=In-Sync Replica Expansions Per Second
+report.kafka.isrExpandsPerSec.columns=isrExpandsPerSec
+report.kafka.isrExpandsPerSec.type=interfaceSnmp
+report.kafka.isrExpandsPerSec.command=--title="In-Sync Replica Expansions Per Second" \
+ --vertical-label="Expansions per second" \
+ DEF:value={rrd1}:isrExpandsPerSec:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Expansions per second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.replicaMaxLag.name=Maximum Lag Between Replicas
+report.kafka.replicaMaxLag.columns=replicaMaxLag
+report.kafka.replicaMaxLag.type=interfaceSnmp
+report.kafka.replicaMaxLag.command=--title="Maximum Lag Between Replicas" \
+ --vertical-label="Messages" \
+ DEF:value={rrd1}:replicaMaxLag:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Maximum Lag" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+report.kafka.purgatoryProduce.name=Purgatory Size: Produce
+report.kafka.purgatoryProduce.columns=purgatoryProduce
+report.kafka.purgatoryProduce.type=interfaceSnmp
+report.kafka.purgatoryProduce.command=--title="Purgatory Size: Produce" \
+ --vertical-label="Requests" \
+ DEF:value={rrd1}:purgatoryProduce:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Producer requests" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.purgatoryFetch.name=Purgatory Size: Fetch
+report.kafka.purgatoryFetch.columns=purgatoryFetch
+report.kafka.purgatoryFetch.type=interfaceSnmp
+report.kafka.purgatoryFetch.command=--title="Purgatory Size: Fetch" \
+ --vertical-label="Requests" \
+ DEF:value={rrd1}:purgatoryFetch:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Fetch requests" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# NOTE: This report reverses the value from idle percentage to busy percentage
+# The original value is between 0 and 1.
+#
+report.kafka.netProcAvgIdle.name=Network Processor Average Busy Percentage
+report.kafka.netProcAvgIdle.columns=netProcAvgIdle
+report.kafka.netProcAvgIdle.type=interfaceSnmp
+report.kafka.netProcAvgIdle.command=--title="Network Processor Average Busy Percentage" \
+ --vertical-label="Percent Busy" \
+ DEF:value={rrd1}:netProcAvgIdle:AVERAGE \
+ CDEF:percent=1,value,-,100,* \
+ AREA:percent#edd400 \
+ LINE2:percent#c4a000:"Percent Busy" \
+ GPRINT:percent:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:percent:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:percent:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# NOTE: This report reverses the value from idle percentage to busy percentage.
+# The original value is between 0 and 1.
+#
+report.kafka.reqHandAvgIdle.name=Request Handler Average Busy Percentage
+report.kafka.reqHandAvgIdle.columns=reqHandAvgIdle
+report.kafka.reqHandAvgIdle.type=interfaceSnmp
+report.kafka.reqHandAvgIdle.command=--title="Request Handler Average Busy Percentage" \
+ --vertical-label="Percent Busy" \
+ DEF:value={rrd1}:reqHandAvgIdle:AVERAGE \
+ CDEF:percent=1,value,-,100,* \
+ AREA:percent#edd400 \
+ LINE2:percent#c4a000:"Percent Busy" \
+ GPRINT:percent:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:percent:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:percent:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# reqSec*
+report.kafka.reqSecProduce.name=Requests per Second: Produce
+report.kafka.reqSecProduce.columns=reqSecProduce
+report.kafka.reqSecProduce.type=interfaceSnmp
+report.kafka.reqSecProduce.command=--title="Requests per Second: Produce" \
+ --vertical-label="Requests per second" \
+ DEF:value={rrd1}:reqSecProduce:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Requests per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.reqSecConsumer.name=Requests per Second: FetchConsumer
+report.kafka.reqSecConsumer.columns=reqSecConsumer
+report.kafka.reqSecConsumer.type=interfaceSnmp
+report.kafka.reqSecConsumer.command=--title="Requests per Second: FetchConsumer" \
+ --vertical-label="Requests per second" \
+ DEF:value={rrd1}:reqSecConsumer:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Requests per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.reqSecFollower.name=Requests per Second: FetchFollower
+report.kafka.reqSecFollower.columns=reqSecFollower
+report.kafka.reqSecFollower.type=interfaceSnmp
+report.kafka.reqSecFollower.command=--title="Requests per Second: FetchFollower" \
+ --vertical-label="Requests per second" \
+ DEF:value={rrd1}:reqSecFollower:AVERAGE \
+ AREA:value#edd400 \
+ LINE2:value#c4a000:"Requests per Second" \
+ GPRINT:value:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:value:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:value:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# totalTime*
+report.kafka.totalTimeProduce.name=Total Time: Produce
+report.kafka.totalTimeProduce.columns=totalTimeProduce
+report.kafka.totalTimeProduce.type=interfaceSnmp
+report.kafka.totalTimeProduce.command=--title="Total Time: Produce" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:totalTimeProduce:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Total Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.totalTimeConsumer.name=Total Time: FetchConsumer
+report.kafka.totalTimeConsumer.columns=totalTimeConsumer
+report.kafka.totalTimeConsumer.type=interfaceSnmp
+report.kafka.totalTimeConsumer.command=--title="Total Time: FetchConsumer" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:totalTimeConsumer:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Total Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.totalTimeFollower.name=Total Time: FetchFollower
+report.kafka.totalTimeFollower.columns=totalTimeFollower
+report.kafka.totalTimeFollower.type=interfaceSnmp
+report.kafka.totalTimeFollower.command=--title="Total Time: FetchFollower" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:totalTimeFollower:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Total Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# queueTime*
+report.kafka.queueTimeProduce.name=Queue Time: Produce
+report.kafka.queueTimeProduce.columns=queueTimeProduce
+report.kafka.queueTimeProduce.type=interfaceSnmp
+report.kafka.queueTimeProduce.command=--title="Queue Time: Produce" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:queueTimeProduce:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Queue Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.queueTimeConsumer.name=Queue Time: FetchConsumer
+report.kafka.queueTimeConsumer.columns=queueTimeConsumer
+report.kafka.queueTimeConsumer.type=interfaceSnmp
+report.kafka.queueTimeConsumer.command=--title="Queue Time: FetchConsumer" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:queueTimeConsumer:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Queue Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.queueTimeFollower.name=Queue Time: FetchFollower
+report.kafka.queueTimeFollower.columns=queueTimeFollower
+report.kafka.queueTimeFollower.type=interfaceSnmp
+report.kafka.queueTimeFollower.command=--title="Queue Time: FetchFollower" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:queueTimeFollower:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Queue Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# localTime*
+report.kafka.localTimeProduce.name=Local Time: Produce
+report.kafka.localTimeProduce.columns=localTimeProduce
+report.kafka.localTimeProduce.type=interfaceSnmp
+report.kafka.localTimeProduce.command=--title="Local Time: Produce" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:localTimeProduce:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Local Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.localTimeConsumer.name=Local Time: FetchConsumer
+report.kafka.localTimeConsumer.columns=localTimeConsumer
+report.kafka.localTimeConsumer.type=interfaceSnmp
+report.kafka.localTimeConsumer.command=--title="Local Time: FetchConsumer" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:localTimeConsumer:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Local Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.localTimeFollower.name=Local Time: FetchFollower
+report.kafka.localTimeFollower.columns=localTimeFollower
+report.kafka.localTimeFollower.type=interfaceSnmp
+report.kafka.localTimeFollower.command=--title="Local Time: FetchFollower" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:localTimeFollower:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Local Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# remoteTime*
+report.kafka.remoteTimeProduce.name=Remote Time: Produce
+report.kafka.remoteTimeProduce.columns=remoteTimeProduce
+report.kafka.remoteTimeProduce.type=interfaceSnmp
+report.kafka.remoteTimeProduce.command=--title="Remote Time: Produce" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:remoteTimeProduce:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Remote Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.remoteTimeConsumer.name=Remote Time: FetchConsumer
+report.kafka.remoteTimeConsumer.columns=remoteTimeConsumer
+report.kafka.remoteTimeConsumer.type=interfaceSnmp
+report.kafka.remoteTimeConsumer.command=--title="Remote Time: FetchConsumer" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:remoteTimeConsumer:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Remote Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.remoteTimeFollower.name=Remote Time: FetchFollower
+report.kafka.remoteTimeFollower.columns=remoteTimeFollower
+report.kafka.remoteTimeFollower.type=interfaceSnmp
+report.kafka.remoteTimeFollower.command=--title="Remote Time: FetchFollower" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:remoteTimeFollower:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Remote Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+
+# sendTime*
+report.kafka.sendTimeProduce.name=Response Send Time: Produce
+report.kafka.sendTimeProduce.columns=sendTimeProduce
+report.kafka.sendTimeProduce.type=interfaceSnmp
+report.kafka.sendTimeProduce.command=--title="Response Send Time: Produce" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:sendTimeProduce:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Response Send Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.sendTimeConsumer.name=Response Send Time: FetchConsumer
+report.kafka.sendTimeConsumer.columns=sendTimeConsumer
+report.kafka.sendTimeConsumer.type=interfaceSnmp
+report.kafka.sendTimeConsumer.command=--title="Response Send Time: FetchConsumer" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:sendTimeConsumer:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Response Send Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
+
+report.kafka.sendTimeFollower.name=Response Send Time: FetchFollower
+report.kafka.sendTimeFollower.columns=sendTimeFollower
+report.kafka.sendTimeFollower.type=interfaceSnmp
+report.kafka.sendTimeFollower.command=--title="Response Send Time: FetchFollower" \
+ --vertical-label="Seconds" \
+ DEF:value={rrd1}:sendTimeFollower:AVERAGE \
+ CDEF:seconds=value,1000,/ \
+ AREA:seconds#edd400 \
+ LINE2:seconds#c4a000:"Response Send Time" \
+ GPRINT:seconds:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:seconds:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:seconds:MAX:" Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/lmsensors-graph.properties b/snmp-graph.properties.d/lmsensors-graph.properties
index 36e9350..1fa6869 100644
--- a/snmp-graph.properties.d/lmsensors-graph.properties
+++ b/snmp-graph.properties.d/lmsensors-graph.properties
@@ -20,7 +20,7 @@ report.lmsensors.temp.command=--title="Temperature on {lms-tempdevice}" \
LINE1:btemp#f57900:"Temperature\\:" \
GPRINT:btemp:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:btemp:MIN:"Min \\: %8.2lf %s" \
- GPRINT:btemp:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:btemp:MAX:"Max \\: %8.2lf %s\\n"
report.lmsensors.fan.name=lmSensors Fan Sensor
report.lmsensors.fan.columns=lms-fan
@@ -31,7 +31,7 @@ report.lmsensors.fan.command=--title="Fan Speed on {lms-fandevice}" \
LINE2:dfan#0000ff:"Fan Speed\\:" \
GPRINT:dfan:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:dfan:MIN:"Min \\: %8.2lf %s" \
- GPRINT:dfan:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:dfan:MAX:"Max \\: %8.2lf %s\\n"
report.lmsensors.volt.name=lmSensors Volt Sensor
report.lmsensors.volt.columns=lms-volt
@@ -43,5 +43,5 @@ report.lmsensors.volt.command=--title="Volt on {lms-voltdevice}" \
LINE2:bvolt#0000ff:"Volt Speed\\:" \
GPRINT:bvolt:AVERAGE:" Avg \\: %8.2lf %s" \
GPRINT:bvolt:MIN:"Min \\: %8.2lf %s" \
- GPRINT:bvolt:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:bvolt:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/microsoft-sql-graph.properties b/snmp-graph.properties.d/microsoft-sql-graph.properties
index 8651d39..f8af9d4 100644
--- a/snmp-graph.properties.d/microsoft-sql-graph.properties
+++ b/snmp-graph.properties.d/microsoft-sql-graph.properties
@@ -71,7 +71,7 @@ report.mssqlhitratios.command=--title="MSSQL Hit Ratios" \
LINE2:logcache#FF0000:"Log Cache \\:" \
GPRINT:logcache:AVERAGE:"Avg\\:%4.1lf" \
GPRINT:logcache:MAX:"Max\\:%4.1lf" \
- GPRINT:logcache:MIN:"Min\\:%4.1lf" \
+ GPRINT:logcache:MIN:"Min\\:%4.1lf"
report.mssqllockwaittime.name=MSSQL Lock Wait Time
report.mssqllockwaittime.columns=sqllockavgwaittime
diff --git a/snmp-graph.properties.d/mikrotik-graph.properties b/snmp-graph.properties.d/mikrotik-graph.properties
index 04c9332..8be5b49 100644
--- a/snmp-graph.properties.d/mikrotik-graph.properties
+++ b/snmp-graph.properties.d/mikrotik-graph.properties
@@ -91,7 +91,7 @@ report.mikrotik.wlstatrssi.command=--title="Wireless Station RSSI" \
AREA:rssi#00ff00:"SigLevel " \
GPRINT:rssi:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:rssi:MIN:"Min \\: %8.2lf %s" \
- GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n"
report.mikrotik.wlrtabrssi.name=Mikrotik Remote Station Signal Level
report.mikrotik.wlrtabrssi.columns=mtxrWlRtabStrength
@@ -102,7 +102,7 @@ report.mikrotik.wlrtabrssi.command=--title="Wireless Station RSSI" \
AREA:rssi#00ff00:"SigLevel " \
GPRINT:rssi:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:rssi:MIN:"Min \\: %8.2lf %s" \
- GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n"
report.mikrotik.wlrtabbit.name=Mikrotik Remote Wls Link Rate
report.mikrotik.wlrtabbit.columns=mtxrWlRtabRxRate,mtxrWlRtabTxRate
diff --git a/snmp-graph.properties.d/mysql-graph.properties b/snmp-graph.properties.d/mysql-graph.properties
index 639098c..986de80 100644
--- a/snmp-graph.properties.d/mysql-graph.properties
+++ b/snmp-graph.properties.d/mysql-graph.properties
@@ -116,7 +116,7 @@ report.mysql.slow.queries.command=--title="MySQL Slow Queries" \
LINE2:slow#0000ff:"Slow Queries" \
GPRINT:slow:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:slow:MIN:"Min \\: %8.2lf %s" \
- GPRINT:slow:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:slow:MAX:"Max \\: %8.2lf %s\\n"
report.mysql.queries.name=MySQL Queries
report.mysql.queries.columns=MyComDelete,MyComDeleteMulti,MyComInsert,MyComInsertSelect,MyComUpdate,MyComUpdateMulti,MyComSelect,MyQuestions
@@ -162,7 +162,7 @@ report.mysql.queries.command=--title="MySQL Queries" \
LINE2:questions#000000:"Questions " \
GPRINT:questions:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:questions:MIN:"Min \\: %8.2lf %s" \
- GPRINT:questions:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:questions:MAX:"Max \\: %8.2lf %s\\n"
report.mysql.key.reads.name=MySQL Key Reads
report.mysql.key.reads.columns=MyKeyReads,MyKeyReadReqs
@@ -178,7 +178,7 @@ report.mysql.key.reads.command=--title="MySQL Key Reads" \
LINE2:readreqs#000000:"Read Requests " \
GPRINT:readreqs:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:readreqs:MIN:"Min \\: %8.2lf %s" \
- GPRINT:readreqs:MAX:"Max \\: %8.2lf %s\\n" \
+ GPRINT:readreqs:MAX:"Max \\: %8.2lf %s\\n"
report.mysql.key.writes.name=MySQL Key Writes
report.mysql.key.writes.columns=MyKeyWrites,MyKeyWriteReqs
diff --git a/snmp-graph.properties.d/netapp-graph.properties b/snmp-graph.properties.d/netapp-graph.properties
index f005c16..d498ea1 100644
--- a/snmp-graph.properties.d/netapp-graph.properties
+++ b/snmp-graph.properties.d/netapp-graph.properties
@@ -232,12 +232,12 @@ report.netapp.sis.command=--title="NetApp {naDfFileSys} SIS percent savings" \
GPRINT:naSisPercent:MAX:"Max \\: %8.2lf %s\\n"
report.netapp.diskio.name=NetApp Disk IO
-report.netapp.diskio.columns=naMiscLowDiskReadBytes, naMiscLowDiskWriteBytes
+report.netapp.diskio.columns=naMscLowDiskRdBytes,naMscLowDiskWrBytes
report.netapp.diskio.type=nodeSnmp
report.netapp.diskio.command=--title="NetApp Disk IO Bytes" \
--vertical-label operations \
- DEF:naMiscLowDiskReadBytes={rrd1}:naMiscLowDiskReadBy:AVERAGE \
- DEF:naMiscLowDiskWriteBytes={rrd2}:naMiscLowDiskWriteB:AVERAGE \
+ DEF:naMiscLowDiskReadBytes={rrd1}:naMscLowDiskRdBytes:AVERAGE \
+ DEF:naMiscLowDiskWriteBytes={rrd2}:naMscLowDiskWrBytes:AVERAGE \
CDEF:naMiscLowDiskWriteBytesNeg=0,naMiscLowDiskWriteBytes,- \
LINE1:naMiscLowDiskReadBytes#0000ff:"IO reads Bytes" \
GPRINT:naMiscLowDiskReadBytes:AVERAGE:" Avg \\: %8.2lf %s" \
diff --git a/snmp-graph.properties.d/opennms-graph.properties b/snmp-graph.properties.d/opennms-graph.properties
index e915a2b..82356f9 100644
--- a/snmp-graph.properties.d/opennms-graph.properties
+++ b/snmp-graph.properties.d/opennms-graph.properties
@@ -164,7 +164,7 @@ report.onms.collectd.threadpool.command=--title="OpenNMS Collectd ThreadPool" \
LINE2:max#9A27F1:"Maximum Active" \
GPRINT:max:AVERAGE:"Avg\\: %5.0lf\" \
GPRINT:max:MIN:"Min\\: %5.0lf" \
- GPRINT:max:MAX:"Max\\: %5.0lf\\n" \
+ GPRINT:max:MAX:"Max\\: %5.0lf\\n"
report.onms.collectd.completedRatio.name=OpenNMS Collectd Task Completion Ratio
report.onms.collectd.completedRatio.columns=ONMSCollectTasksTot,ONMSCollectTasksCpt
diff --git a/snmp-graph.properties.d/opennms-minion-graph.properties b/snmp-graph.properties.d/opennms-minion-graph.properties
new file mode 100644
index 0000000..b01b4af
--- /dev/null
+++ b/snmp-graph.properties.d/opennms-minion-graph.properties
@@ -0,0 +1,271 @@
+reports=\
+OpenNMS.Minion.RPC.Server.Detect.Exchanges, \
+OpenNMS.Minion.RPC.Server.Detect.ProcessingTime, \
+OpenNMS.Minion.RPC.Server.DNS.Exchanges, \
+OpenNMS.Minion.RPC.Server.DNS.ProcessingTime, \
+OpenNMS.Minion.RPC.Server.PING.Exchanges, \
+OpenNMS.Minion.RPC.Server.PING.ProcessingTime, \
+OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges, \
+OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime, \
+OpenNMS.Minion.RPC.Server.Poller.Exchanges, \
+OpenNMS.Minion.RPC.Server.Poller.ProcessingTime, \
+OpenNMS.Minion.RPC.Server.SNMP.Exchanges, \
+OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime, \
+OpenNMS.Minion.Syslogd.Listener.Exchanges
+
+
+###########################################
+## OpenNMS.REPORT_NAME.Exchanges
+###########################################
+#report.REPORT_NAME.name=METRIC Exchanges
+#report.REPORT_NAME.columns=METRICComplete, METRICFailed
+#report.REPORT_NAME.type=interfaceSnmp
+#report.REPORT_NAME.command=--title="METRIC Exchanges" \
+# --vertical-label="Messages per second" \
+# DEF:complete={rrd1}:METRICComplete:AVERAGE \
+# DEF:failed={rrd2}:METRICFailed:AVERAGE \
+# AREA:failed#EF343B:"Failed Messages " \
+# GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+# GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+# GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+# STACK:complete#8DC63F:"Successful Messages" \
+# GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+# GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+# GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.Detect.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.Detect.Exchanges.name=Provisioning Detection Messages Received
+report.OpenNMS.Minion.RPC.Server.Detect.Exchanges.columns=DetectComplete, DetectFailed
+report.OpenNMS.Minion.RPC.Server.Detect.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.Detect.Exchanges.command=--title="Provisioning Detection Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:DetectComplete:AVERAGE \
+ DEF:failed={rrd2}:DetectFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.Detect.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.Detect.ProcessingTime.name=Provisioning Detection Processing Time
+report.OpenNMS.Minion.RPC.Server.Detect.ProcessingTime.columns=DetectLastProc, DetectMeanProc
+report.OpenNMS.Minion.RPC.Server.Detect.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.Detect.ProcessingTime.command=--title="Provisioning Detection Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:DetectLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:DetectMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.DNS.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.DNS.Exchanges.name=DNS Messages Received
+report.OpenNMS.Minion.RPC.Server.DNS.Exchanges.columns=DnsComplete, DnsFailed
+report.OpenNMS.Minion.RPC.Server.DNS.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.DNS.Exchanges.command=--title="DNS Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:DnsComplete:AVERAGE \
+ DEF:failed={rrd2}:DnsFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.DNS.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.DNS.ProcessingTime.name=DNS Processing Time
+report.OpenNMS.Minion.RPC.Server.DNS.ProcessingTime.columns=DnsLastProc, DnsMeanProc
+report.OpenNMS.Minion.RPC.Server.DNS.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.DNS.ProcessingTime.command=--title="DNS Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:DnsLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:DnsMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.PING.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.PING.Exchanges.name=Ping Messages Received
+report.OpenNMS.Minion.RPC.Server.PING.Exchanges.columns=PingComplete, PingFailed
+report.OpenNMS.Minion.RPC.Server.PING.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.PING.Exchanges.command=--title="Ping Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:PingComplete:AVERAGE \
+ DEF:failed={rrd2}:PingFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.PING.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.PING.ProcessingTime.name=Ping Processing Time
+report.OpenNMS.Minion.RPC.Server.PING.ProcessingTime.columns=PingLastProc, PingMeanProc
+report.OpenNMS.Minion.RPC.Server.PING.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.PING.ProcessingTime.command=--title="Ping Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:PingLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:PingMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges.name=Ping Sweep Messages Received
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges.columns=SweepComplete, SweepFailed
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.Exchanges.command=--title="Ping Sweep Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:SweepComplete:AVERAGE \
+ DEF:failed={rrd2}:SweepFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime.name=Ping Sweep Processing Time
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime.columns=SweepLastProc, SweepMeanProc
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.PING-SWEEP.ProcessingTime.command=--title="Ping Sweep Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:SweepLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:SweepMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.Poller.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.Poller.Exchanges.name=Poller Monitor Messages Received
+report.OpenNMS.Minion.RPC.Server.Poller.Exchanges.columns=PollComplete, PollFailed
+report.OpenNMS.Minion.RPC.Server.Poller.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.Poller.Exchanges.command=--title="Poller Monitor Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:PollComplete:AVERAGE \
+ DEF:failed={rrd2}:PollFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.Poller.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.Poller.ProcessingTime.name=Poller Monitor Processing Time
+report.OpenNMS.Minion.RPC.Server.Poller.ProcessingTime.columns=PollLastProc, PollMeanProc
+report.OpenNMS.Minion.RPC.Server.Poller.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.Poller.ProcessingTime.command=--title="Poller Monitor Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:PollLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:PollMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.RPC.Server.SNMP.Exchanges
+###########################################
+report.OpenNMS.Minion.RPC.Server.SNMP.Exchanges.name=SNMP Messages Received
+report.OpenNMS.Minion.RPC.Server.SNMP.Exchanges.columns=SnmpComplete, SnmpFailed
+report.OpenNMS.Minion.RPC.Server.SNMP.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.SNMP.Exchanges.command=--title="SNMP Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:SnmpComplete:AVERAGE \
+ DEF:failed={rrd2}:SnmpFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
+###########################################
+## OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime
+###########################################
+report.OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime.name=SNMP Processing Time
+report.OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime.columns=SnmpLastProc, SnmpMeanProc
+report.OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime.type=interfaceSnmp
+report.OpenNMS.Minion.RPC.Server.SNMP.ProcessingTime.command=--title="SNMP Processing Time" \
+ --vertical-label="Seconds per message" \
+ DEF:mqLast={rrd1}:SnmpLastProc:AVERAGE \
+ DEF:mqMean={rrd2}:SnmpMeanProc:AVERAGE \
+ CDEF:mqLastSec=mqLast,1000,/ \
+ AREA:mqLastSec#73d216:"Process via JMS" \
+ GPRINT:mqLastSec:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:mqLastSec:MAX:" Max \\: %8.2lf %s\\n"
+
+
+###########################################
+## OpenNMS.Minion.Syslogd.Listener.Exchanges
+###########################################
+report.OpenNMS.Minion.Syslogd.Listener.Exchanges.name=Syslog Messages Received
+report.OpenNMS.Minion.Syslogd.Listener.Exchanges.columns=SlogListComplete, SlogListFailed
+report.OpenNMS.Minion.Syslogd.Listener.Exchanges.type=interfaceSnmp
+report.OpenNMS.Minion.Syslogd.Listener.Exchanges.command=--title="Syslog Messages Received" \
+ --vertical-label="Messages per second" \
+ DEF:complete={rrd1}:SlogListComplete:AVERAGE \
+ DEF:failed={rrd2}:SlogListFailed:AVERAGE \
+ AREA:failed#EF343B:"Failed Messages " \
+ GPRINT:failed:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:failed:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:failed:MAX:" Max \\: %8.2lf %s\\n" \
+ STACK:complete#8DC63F:"Successful Messages" \
+ GPRINT:complete:AVERAGE:" Avg \\: %8.2lf %s" \
+ GPRINT:complete:MIN:" Min \\: %8.2lf %s" \
+ GPRINT:complete:MAX:" Max \\: %8.2lf %s\\n"
+
diff --git a/snmp-graph.properties.d/paloalto-graph.properties b/snmp-graph.properties.d/paloalto-graph.properties
index 9da2e5e..a95017c 100644
--- a/snmp-graph.properties.d/paloalto-graph.properties
+++ b/snmp-graph.properties.d/paloalto-graph.properties
@@ -32,7 +32,7 @@ report.paloalto.envt.command=--title="Environmental Status of {panentPhysicalNam
COMMENT:"\\n" \
GPRINT:currentSessions:AVERAGE:"Avg \\: %1.0lf" \
GPRINT:currentSessions:MIN:"Min \\: %1.0lf" \
- GPRINT:currentSessions:MAX:"Max \\: %1.0lf \\n"
+ GPRINT:currentSessions:MAX:"Max \\: %1.0lf \\n"
report.paloalto.session.util.name=Chassis Sessions (Palo Alto)
report.paloalto.session.util.columns=panSessionUtil
@@ -153,7 +153,7 @@ report.paloalto.lc.vdisk.command=--title="Log Collector Disk Usage" \
LINE2:vdisk4MB#f0000f:"Disk 4" \
GPRINT:vdisk4MB:AVERAGE:"Avg \\: %1.0lf" \
GPRINT:vdisk4MB:MIN:"Min \\: %1.0lf" \
- GPRINT:vdisk4MB:MAX:"Max \\: %1.0lf \\n"
+ GPRINT:vdisk4MB:MAX:"Max \\: %1.0lf \\n"
report.paloalto.lc.age.name=PaloAlto Log Collector Log Age
diff --git a/snmp-graph.properties.d/postgresql-graph.properties b/snmp-graph.properties.d/postgresql-graph.properties
index 2f328a3..6dde90e 100644
--- a/snmp-graph.properties.d/postgresql-graph.properties
+++ b/snmp-graph.properties.d/postgresql-graph.properties
@@ -60,7 +60,7 @@ report.pgsql.dbsize.command=--title="PostgreSQL DB Size - {datname}" \
AREA:size#7EE600:"DB Size" \
GPRINT:size:AVERAGE:" Avg\\: %8.2lf %s" \
GPRINT:size:MIN:"Min\\: %8.2lf %s" \
- GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
report.pgsql.dbbackends.name=PostgreSQL DB Backends
report.pgsql.dbbackends.type=pgDatabase
@@ -76,7 +76,7 @@ report.pgsql.dbbackends.command=--title="PostgreSQL DB Backends - {datname}" \
AREA:size#7EE600:"Backends" \
GPRINT:size:AVERAGE:" Avg\\: %8.2lf %s" \
GPRINT:size:MIN:"Min\\: %8.2lf %s" \
- GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
report.pgsql.tssize.name=PostgreSQL Tablespace Size
@@ -93,5 +93,5 @@ report.pgsql.tssize.command=--title="PostgreSQL Tablespace Size - {spcname}" \
AREA:size#7EE600:"Tablespace Size" \
GPRINT:size:AVERAGE:" Avg\\: %8.2lf %s" \
GPRINT:size:MIN:"Min\\: %8.2lf %s" \
- GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
+ GPRINT:size:MAX:"Max\\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/riverbed-steelhead-graph.properties b/snmp-graph.properties.d/riverbed-steelhead-graph.properties
index 0d3ee1b..cad0ca7 100644
--- a/snmp-graph.properties.d/riverbed-steelhead-graph.properties
+++ b/snmp-graph.properties.d/riverbed-steelhead-graph.properties
@@ -260,7 +260,7 @@ report.riverbed.steelhead.cpuStats.command=--title="Riverbed Steelhead CPU Stats
STACK:idle#a0ffa0:"Idle " \
GPRINT:idle:AVERAGE:"Avg \\: %8.2lf" \
GPRINT:idle:MIN:"Min \\: %8.2lf" \
- GPRINT:idle:MAX:"Max \\: %8.2lf" \
+ GPRINT:idle:MAX:"Max \\: %8.2lf"
report.riverbed.steelhead.portBandwidth.name=Riverbed Steelhead Port Bandwidth
report.riverbed.steelhead.portBandwidth.columns=rbshBwPortInLan,rbshBwPortInWan,rbshBwPortOutLan,rbshBwPortOutWan
diff --git a/snmp-graph.properties.d/sofaware-embeddedngx-graph.properties b/snmp-graph.properties.d/sofaware-embeddedngx-graph.properties
index bc6c42b..c6f78b9 100644
--- a/snmp-graph.properties.d/sofaware-embeddedngx-graph.properties
+++ b/snmp-graph.properties.d/sofaware-embeddedngx-graph.properties
@@ -78,7 +78,7 @@ report.sofaware.embeddedngx.storageFirm.command=--title="SofaWare Embedded NGX F
LINE2:firmTot#000000:"Total" \
GPRINT:firmTot:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:firmTot:MIN:"Min \\: %8.2lf %s" \
- GPRINT:firmTot:MAX:"Max \\: %8.2lf %s"
+ GPRINT:firmTot:MAX:"Max \\: %8.2lf %s"
report.sofaware.embeddedngx.storageCF.name=SofaWare Embedded NGX CF Storage
report.sofaware.embeddedngx.storageCF.columns=swStorageCFTot,swStorageCFFree
@@ -96,7 +96,7 @@ report.sofaware.embeddedngx.storageCF.command=--title="SofaWare Embedded NGX CF
LINE2:cfTot#000000:"Total" \
GPRINT:cfTot:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:cfTot:MIN:"Min \\: %8.2lf %s" \
- GPRINT:cfTot:MAX:"Max \\: %8.2lf %s"
+ GPRINT:cfTot:MAX:"Max \\: %8.2lf %s"
report.sofaware.embeddedngx.licenses.name=SofaWare Embedded NGX Licenses
report.sofaware.embeddedngx.licenses.columns=swLicenseUsedNodes
diff --git a/snmp-graph.properties.d/vmware-cim-graph-simple.properties b/snmp-graph.properties.d/vmware-cim-graph-simple.properties
index 6c0b8bd..1e1a4c5 100644
--- a/snmp-graph.properties.d/vmware-cim-graph-simple.properties
+++ b/snmp-graph.properties.d/vmware-cim-graph-simple.properties
@@ -25,7 +25,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorOther.name=SensorOther
report.vmwareCim.SensorOther.columns=CurrentReading
@@ -37,7 +37,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorTemperature.name=SensorTemperature
report.vmwareCim.SensorTemperature.columns=CurrentReading
@@ -49,7 +49,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorVoltage.name=SensorVoltage
report.vmwareCim.SensorVoltage.columns=CurrentReading
@@ -61,7 +61,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorCurrent.name=SensorCurrent
report.vmwareCim.SensorCurrent.columns=CurrentReading
@@ -73,7 +73,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorCounter.name=SensorCounter
report.vmwareCim.SensorCounter.columns=CurrentReading
@@ -87,7 +87,7 @@ AREA:xxx#729fcf \
LINE1:xxx#3465a4:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorTachometer.name=SensorTachometer
report.vmwareCim.SensorTachometer.columns=CurrentReading
@@ -99,7 +99,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorSwitch.name=SensorSwitch
report.vmwareCim.SensorSwitch.columns=CurrentReading
@@ -111,7 +111,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorLock.name=SensorLock
report.vmwareCim.SensorLock.columns=CurrentReading
@@ -123,7 +123,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorHumidity.name=SensorHumidity
report.vmwareCim.SensorHumidity.columns=CurrentReading
@@ -135,7 +135,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorSmokeDetection.name=SensorSmokeDetection
report.vmwareCim.SensorSmokeDetection.columns=CurrentReading
@@ -147,7 +147,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorPresence.name=SensorPresence
report.vmwareCim.SensorPresence.columns=CurrentReading
@@ -159,7 +159,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorAirFlow.name=SensorAirFlow
report.vmwareCim.SensorAirFlow.columns=CurrentReading
@@ -171,7 +171,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorPowerConsumption.name=SensorPowerConsumption
report.vmwareCim.SensorPowerConsumption.columns=CurrentReading
@@ -183,7 +183,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorPowerProduction.name=SensorPowerProduction
report.vmwareCim.SensorPowerProduction.columns=CurrentReading
@@ -195,7 +195,7 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmwareCim.SensorPressure.name=SensorPressure
report.vmwareCim.SensorPressure.columns=CurrentReading
@@ -207,5 +207,5 @@ DEF:xxx={rrd1}:CurrentReading:AVERAGE \
LINE2:xxx#0000ff:"CurrentReading" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/vmware3-graph-simple.properties b/snmp-graph.properties.d/vmware3-graph-simple.properties
index f99b3a8..d8565a8 100644
--- a/snmp-graph.properties.d/vmware3-graph-simple.properties
+++ b/snmp-graph.properties.d/vmware3-graph-simple.properties
@@ -136,7 +136,7 @@ DEF:xxx={rrd1}:CpuIdleSum:AVERAGE \
LINE2:xxx#0000ff:"CpuIdleSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsageNon.name=CpuUsageNon
report.vmware3.CpuUsageNon.columns=CpuUsageNon
@@ -148,7 +148,7 @@ DEF:xxx={rrd1}:CpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsedSum.name=CpuUsedSum
report.vmware3.CpuUsedSum.columns=CpuUsedSum
@@ -160,7 +160,7 @@ DEF:xxx={rrd1}:CpuUsedSum:AVERAGE \
LINE2:xxx#0000ff:"CpuUsedSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuRdCyAvg.name=CpuRdCyAvg
report.vmware3.CpuRdCyAvg.columns=CpuRdCyAvg
@@ -171,7 +171,7 @@ DEF:xxx={rrd1}:CpuRdCyAvg:AVERAGE \
LINE2:xxx#0000ff:"CpuRdCyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsageNon.name=CpuUsageNon
report.vmware3.CpuUsageNon.columns=CpuUsageNon
@@ -182,7 +182,7 @@ DEF:xxx={rrd1}:CpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware3.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -193,7 +193,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskUsageNon.name=DiskUsageNon
report.vmware3.DiskUsageNon.columns=DiskUsageNon
@@ -204,7 +204,7 @@ DEF:xxx={rrd1}:DiskUsageNon:AVERAGE \
LINE2:xxx#0000ff:"DiskUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemActiveNon.name=MemActiveNon
report.vmware3.MemActiveNon.columns=MemActiveNon
@@ -215,7 +215,7 @@ DEF:xxx={rrd1}:MemActiveNon:AVERAGE \
LINE2:xxx#0000ff:"MemActiveNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemConsumedNon.name=MemConsumedNon
report.vmware3.MemConsumedNon.columns=MemConsumedNon
@@ -226,7 +226,7 @@ DEF:xxx={rrd1}:MemConsumedNon:AVERAGE \
LINE2:xxx#0000ff:"MemConsumedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemGrantedNon.name=MemGrantedNon
report.vmware3.MemGrantedNon.columns=MemGrantedNon
@@ -237,7 +237,7 @@ DEF:xxx={rrd1}:MemGrantedNon:AVERAGE \
LINE2:xxx#0000ff:"MemGrantedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemHeapNon.name=MemHeapNon
report.vmware3.MemHeapNon.columns=MemHeapNon
@@ -248,7 +248,7 @@ DEF:xxx={rrd1}:MemHeapNon:AVERAGE \
LINE2:xxx#0000ff:"MemHeapNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemHeapfreeNon.name=MemHeapfreeNon
report.vmware3.MemHeapfreeNon.columns=MemHeapfreeNon
@@ -259,7 +259,7 @@ DEF:xxx={rrd1}:MemHeapfreeNon:AVERAGE \
LINE2:xxx#0000ff:"MemHeapfreeNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemOdNon.name=MemOdNon
report.vmware3.MemOdNon.columns=MemOdNon
@@ -270,7 +270,7 @@ DEF:xxx={rrd1}:MemOdNon:AVERAGE \
LINE2:xxx#0000ff:"MemOdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemRdCyAvg.name=MemRdCyAvg
report.vmware3.MemRdCyAvg.columns=MemRdCyAvg
@@ -281,7 +281,7 @@ DEF:xxx={rrd1}:MemRdCyAvg:AVERAGE \
LINE2:xxx#0000ff:"MemRdCyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSharedNon.name=MemSharedNon
report.vmware3.MemSharedNon.columns=MemSharedNon
@@ -292,7 +292,7 @@ DEF:xxx={rrd1}:MemSharedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSharedcommonNon.name=MemSharedcommonNon
report.vmware3.MemSharedcommonNon.columns=MemSharedcommonNon
@@ -303,7 +303,7 @@ DEF:xxx={rrd1}:MemSharedcommonNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedcommonNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpinNon.name=MemSpinNon
report.vmware3.MemSpinNon.columns=MemSpinNon
@@ -314,7 +314,7 @@ DEF:xxx={rrd1}:MemSpinNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpinNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpoutNon.name=MemSpoutNon
report.vmware3.MemSpoutNon.columns=MemSpoutNon
@@ -325,7 +325,7 @@ DEF:xxx={rrd1}:MemSpoutNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpusedNon.name=MemSpusedNon
report.vmware3.MemSpusedNon.columns=MemSpusedNon
@@ -336,7 +336,7 @@ DEF:xxx={rrd1}:MemSpusedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpusedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemStateLat.name=MemStateLat
report.vmware3.MemStateLat.columns=MemStateLat
@@ -347,7 +347,7 @@ DEF:xxx={rrd1}:MemStateLat:AVERAGE \
LINE2:xxx#0000ff:"MemStateLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSysUsageNon.name=MemSysUsageNon
report.vmware3.MemSysUsageNon.columns=MemSysUsageNon
@@ -358,7 +358,7 @@ DEF:xxx={rrd1}:MemSysUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemSysUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemUdNon.name=MemUdNon
report.vmware3.MemUdNon.columns=MemUdNon
@@ -369,7 +369,7 @@ DEF:xxx={rrd1}:MemUdNon:AVERAGE \
LINE2:xxx#0000ff:"MemUdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemUsageNon.name=MemUsageNon
report.vmware3.MemUsageNon.columns=MemUsageNon
@@ -380,7 +380,7 @@ DEF:xxx={rrd1}:MemUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemVmmemctlNon.name=MemVmmemctlNon
report.vmware3.MemVmmemctlNon.columns=MemVmmemctlNon
@@ -391,7 +391,7 @@ DEF:xxx={rrd1}:MemVmmemctlNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemZeroNon.name=MemZeroNon
report.vmware3.MemZeroNon.columns=MemZeroNon
@@ -402,7 +402,7 @@ DEF:xxx={rrd1}:MemZeroNon:AVERAGE \
LINE2:xxx#0000ff:"MemZeroNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetUsageNon.name=NetUsageNon
report.vmware3.NetUsageNon.columns=NetUsageNon
@@ -413,7 +413,7 @@ DEF:xxx={rrd1}:NetUsageNon:AVERAGE \
LINE2:xxx#0000ff:"NetUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav15Lat.name=ResCpuActav15Lat
report.vmware3.ResCpuActav15Lat.columns=ResCpuActav15Lat
@@ -424,7 +424,7 @@ DEF:xxx={rrd1}:ResCpuActav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav1Lat.name=ResCpuActav1Lat
report.vmware3.ResCpuActav1Lat.columns=ResCpuActav1Lat
@@ -435,7 +435,7 @@ DEF:xxx={rrd1}:ResCpuActav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav5Lat.name=ResCpuActav5Lat
report.vmware3.ResCpuActav5Lat.columns=ResCpuActav5Lat
@@ -446,7 +446,7 @@ DEF:xxx={rrd1}:ResCpuActav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk15Lat.name=ResCpuActpk15Lat
report.vmware3.ResCpuActpk15Lat.columns=ResCpuActpk15Lat
@@ -457,7 +457,7 @@ DEF:xxx={rrd1}:ResCpuActpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk1Lat.name=ResCpuActpk1Lat
report.vmware3.ResCpuActpk1Lat.columns=ResCpuActpk1Lat
@@ -468,7 +468,7 @@ DEF:xxx={rrd1}:ResCpuActpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk5Lat.name=ResCpuActpk5Lat
report.vmware3.ResCpuActpk5Lat.columns=ResCpuActpk5Lat
@@ -479,7 +479,7 @@ DEF:xxx={rrd1}:ResCpuActpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd15Lat.name=ResCpuMaxLd15Lat
report.vmware3.ResCpuMaxLd15Lat.columns=ResCpuMaxLd15Lat
@@ -490,7 +490,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd1Lat.name=ResCpuMaxLd1Lat
report.vmware3.ResCpuMaxLd1Lat.columns=ResCpuMaxLd1Lat
@@ -501,7 +501,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd5Lat.name=ResCpuMaxLd5Lat
report.vmware3.ResCpuMaxLd5Lat.columns=ResCpuMaxLd5Lat
@@ -512,7 +512,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav15Lat.name=ResCpuRunav15Lat
report.vmware3.ResCpuRunav15Lat.columns=ResCpuRunav15Lat
@@ -523,7 +523,7 @@ DEF:xxx={rrd1}:ResCpuRunav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav1Lat.name=ResCpuRunav1Lat
report.vmware3.ResCpuRunav1Lat.columns=ResCpuRunav1Lat
@@ -534,7 +534,7 @@ DEF:xxx={rrd1}:ResCpuRunav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav5Lat.name=ResCpuRunav5Lat
report.vmware3.ResCpuRunav5Lat.columns=ResCpuRunav5Lat
@@ -545,7 +545,7 @@ DEF:xxx={rrd1}:ResCpuRunav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk15Lat.name=ResCpuRunpk15Lat
report.vmware3.ResCpuRunpk15Lat.columns=ResCpuRunpk15Lat
@@ -556,7 +556,7 @@ DEF:xxx={rrd1}:ResCpuRunpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk1Lat.name=ResCpuRunpk1Lat
report.vmware3.ResCpuRunpk1Lat.columns=ResCpuRunpk1Lat
@@ -567,7 +567,7 @@ DEF:xxx={rrd1}:ResCpuRunpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk5Lat.name=ResCpuRunpk5Lat
report.vmware3.ResCpuRunpk5Lat.columns=ResCpuRunpk5Lat
@@ -578,7 +578,7 @@ DEF:xxx={rrd1}:ResCpuRunpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuSeCtLat.name=ResCpuSeCtLat
report.vmware3.ResCpuSeCtLat.columns=ResCpuSeCtLat
@@ -589,7 +589,7 @@ DEF:xxx={rrd1}:ResCpuSeCtLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSeCtLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuSePeriodLat.name=ResCpuSePeriodLat
report.vmware3.ResCpuSePeriodLat.columns=ResCpuSePeriodLat
@@ -600,7 +600,7 @@ DEF:xxx={rrd1}:ResCpuSePeriodLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSePeriodLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.SysUptimeLat.name=SysUptimeLat
report.vmware3.SysUptimeLat.columns=SysUptimeLat
@@ -611,7 +611,7 @@ DEF:xxx={rrd1}:SysUptimeLat:AVERAGE \
LINE2:xxx#0000ff:"SysUptimeLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MgtAgtMemUsedAvg.name=MgtAgtMemUsedAvg
report.vmware3.MgtAgtMemUsedAvg.columns=MgtAgtMemUsedAvg
@@ -623,7 +623,7 @@ DEF:xxx={rrd1}:MgtAgtMemUsedAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtMemUsedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MgtAgtSpInAvg.name=MgtAgtSpInAvg
report.vmware3.MgtAgtSpInAvg.columns=MgtAgtSpInAvg
@@ -635,7 +635,7 @@ DEF:xxx={rrd1}:MgtAgtSpInAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpInAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MgtAgtSpOutAvg.name=MgtAgtSpOutAvg
report.vmware3.MgtAgtSpOutAvg.columns=MgtAgtSpOutAvg
@@ -647,7 +647,7 @@ DEF:xxx={rrd1}:MgtAgtSpOutAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpOutAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MgtAgtSpUsedAvg.name=MgtAgtSpUsedAvg
report.vmware3.MgtAgtSpUsedAvg.columns=MgtAgtSpUsedAvg
@@ -659,7 +659,7 @@ DEF:xxx={rrd1}:MgtAgtSpUsedAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpUsedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetPacketsRxSum.name=NetPacketsRxSum
report.vmware3.NetPacketsRxSum.columns=NetPacketsRxSum
@@ -671,7 +671,7 @@ DEF:xxx={rrd1}:NetPacketsRxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsRxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetPacketsTxSum.name=NetPacketsTxSum
report.vmware3.NetPacketsTxSum.columns=NetPacketsTxSum
@@ -683,7 +683,7 @@ DEF:xxx={rrd1}:NetPacketsTxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsTxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetReceivedAvg.name=NetReceivedAvg
report.vmware3.NetReceivedAvg.columns=NetReceivedAvg
@@ -695,7 +695,7 @@ DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetReceivedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetTransmittedAvg.name=NetTransmittedAvg
report.vmware3.NetTransmittedAvg.columns=NetTransmittedAvg
@@ -707,7 +707,7 @@ DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetTransmittedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskBusResetsSum.name=DiskBusResetsSum
report.vmware3.DiskBusResetsSum.columns=DiskBusResetsSum
@@ -719,7 +719,7 @@ DEF:xxx={rrd1}:DiskBusResetsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskBusResetsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskCsAdSum.name=DiskCsAdSum
report.vmware3.DiskCsAdSum.columns=DiskCsAdSum
@@ -731,7 +731,7 @@ DEF:xxx={rrd1}:DiskCsAdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsAdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskCsSum.name=DiskCsSum
report.vmware3.DiskCsSum.columns=DiskCsSum
@@ -743,7 +743,7 @@ DEF:xxx={rrd1}:DiskCsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskDeLyAvg.name=DiskDeLyAvg
report.vmware3.DiskDeLyAvg.columns=DiskDeLyAvg
@@ -755,7 +755,7 @@ DEF:xxx={rrd1}:DiskDeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskDeRdLyAvg.name=DiskDeRdLyAvg
report.vmware3.DiskDeRdLyAvg.columns=DiskDeRdLyAvg
@@ -767,7 +767,7 @@ DEF:xxx={rrd1}:DiskDeRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskDeWeLyAvg.name=DiskDeWeLyAvg
report.vmware3.DiskDeWeLyAvg.columns=DiskDeWeLyAvg
@@ -779,7 +779,7 @@ DEF:xxx={rrd1}:DiskDeWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskKlLyAvg.name=DiskKlLyAvg
report.vmware3.DiskKlLyAvg.columns=DiskKlLyAvg
@@ -791,7 +791,7 @@ DEF:xxx={rrd1}:DiskKlLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskKlRdLyAvg.name=DiskKlRdLyAvg
report.vmware3.DiskKlRdLyAvg.columns=DiskKlRdLyAvg
@@ -803,7 +803,7 @@ DEF:xxx={rrd1}:DiskKlRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskKlWeLyAvg.name=DiskKlWeLyAvg
report.vmware3.DiskKlWeLyAvg.columns=DiskKlWeLyAvg
@@ -815,7 +815,7 @@ DEF:xxx={rrd1}:DiskKlWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskNrRdSum.name=DiskNrRdSum
report.vmware3.DiskNrRdSum.columns=DiskNrRdSum
@@ -827,7 +827,7 @@ DEF:xxx={rrd1}:DiskNrRdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrRdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskNrWeSum.name=DiskNrWeSum
report.vmware3.DiskNrWeSum.columns=DiskNrWeSum
@@ -839,7 +839,7 @@ DEF:xxx={rrd1}:DiskNrWeSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrWeSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskQeLyAvg.name=DiskQeLyAvg
report.vmware3.DiskQeLyAvg.columns=DiskQeLyAvg
@@ -851,7 +851,7 @@ DEF:xxx={rrd1}:DiskQeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskQeRdLyAvg.name=DiskQeRdLyAvg
report.vmware3.DiskQeRdLyAvg.columns=DiskQeRdLyAvg
@@ -863,7 +863,7 @@ DEF:xxx={rrd1}:DiskQeRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskQeWeLyAvg.name=DiskQeWeLyAvg
report.vmware3.DiskQeWeLyAvg.columns=DiskQeWeLyAvg
@@ -875,7 +875,7 @@ DEF:xxx={rrd1}:DiskQeWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskRdAvg.name=DiskRdAvg
report.vmware3.DiskRdAvg.columns=DiskRdAvg
@@ -887,7 +887,7 @@ DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskRdAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskTlLyAvg.name=DiskTlLyAvg
report.vmware3.DiskTlLyAvg.columns=DiskTlLyAvg
@@ -899,7 +899,7 @@ DEF:xxx={rrd1}:DiskTlLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskTlRdLyAvg.name=DiskTlRdLyAvg
report.vmware3.DiskTlRdLyAvg.columns=DiskTlRdLyAvg
@@ -911,7 +911,7 @@ DEF:xxx={rrd1}:DiskTlRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskTlWeLyAvg.name=DiskTlWeLyAvg
report.vmware3.DiskTlWeLyAvg.columns=DiskTlWeLyAvg
@@ -923,7 +923,7 @@ DEF:xxx={rrd1}:DiskTlWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskWeAvg.name=DiskWeAvg
report.vmware3.DiskWeAvg.columns=DiskWeAvg
@@ -935,7 +935,7 @@ DEF:xxx={rrd1}:DiskWeAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskWeAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.SysReCpuUsageNon.name=SysReCpuUsageNon
report.vmware3.SysReCpuUsageNon.columns=SysReCpuUsageNon
@@ -947,7 +947,7 @@ DEF:xxx={rrd1}:SysReCpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuExtraSum.name=CpuExtraSum
report.vmware3.CpuExtraSum.columns=CpuExtraSum
@@ -959,7 +959,7 @@ DEF:xxx={rrd1}:CpuExtraSum:AVERAGE \
LINE2:xxx#0000ff:"CpuExtraSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuGuaranteedLat.name=CpuGuaranteedLat
report.vmware3.CpuGuaranteedLat.columns=CpuGuaranteedLat
@@ -971,7 +971,7 @@ DEF:xxx={rrd1}:CpuGuaranteedLat:AVERAGE \
LINE2:xxx#0000ff:"CpuGuaranteedLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuRdySum.name=CpuRdySum
report.vmware3.CpuRdySum.columns=CpuRdySum
@@ -983,7 +983,7 @@ DEF:xxx={rrd1}:CpuRdySum:AVERAGE \
LINE2:xxx#0000ff:"CpuRdySum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuSystemSum.name=CpuSystemSum
report.vmware3.CpuSystemSum.columns=CpuSystemSum
@@ -995,7 +995,7 @@ DEF:xxx={rrd1}:CpuSystemSum:AVERAGE \
LINE2:xxx#0000ff:"CpuSystemSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware3.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -1007,7 +1007,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsedSum.name=CpuUsedSum
report.vmware3.CpuUsedSum.columns=CpuUsedSum
@@ -1019,7 +1019,7 @@ DEF:xxx={rrd1}:CpuUsedSum:AVERAGE \
LINE2:xxx#0000ff:"CpuUsedSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuWaitSum.name=CpuWaitSum
report.vmware3.CpuWaitSum.columns=CpuWaitSum
@@ -1031,7 +1031,7 @@ DEF:xxx={rrd1}:CpuWaitSum:AVERAGE \
LINE2:xxx#0000ff:"CpuWaitSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsageNon.name=CpuUsageNon
report.vmware3.CpuUsageNon.columns=CpuUsageNon
@@ -1042,7 +1042,7 @@ DEF:xxx={rrd1}:CpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware3.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -1053,7 +1053,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskUsageNon.name=DiskUsageNon
report.vmware3.DiskUsageNon.columns=DiskUsageNon
@@ -1064,7 +1064,7 @@ DEF:xxx={rrd1}:DiskUsageNon:AVERAGE \
LINE2:xxx#0000ff:"DiskUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemActiveNon.name=MemActiveNon
report.vmware3.MemActiveNon.columns=MemActiveNon
@@ -1075,7 +1075,7 @@ DEF:xxx={rrd1}:MemActiveNon:AVERAGE \
LINE2:xxx#0000ff:"MemActiveNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemConsumedNon.name=MemConsumedNon
report.vmware3.MemConsumedNon.columns=MemConsumedNon
@@ -1086,7 +1086,7 @@ DEF:xxx={rrd1}:MemConsumedNon:AVERAGE \
LINE2:xxx#0000ff:"MemConsumedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemGrantedNon.name=MemGrantedNon
report.vmware3.MemGrantedNon.columns=MemGrantedNon
@@ -1097,7 +1097,7 @@ DEF:xxx={rrd1}:MemGrantedNon:AVERAGE \
LINE2:xxx#0000ff:"MemGrantedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemOdNon.name=MemOdNon
report.vmware3.MemOdNon.columns=MemOdNon
@@ -1108,7 +1108,7 @@ DEF:xxx={rrd1}:MemOdNon:AVERAGE \
LINE2:xxx#0000ff:"MemOdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSharedNon.name=MemSharedNon
report.vmware3.MemSharedNon.columns=MemSharedNon
@@ -1119,7 +1119,7 @@ DEF:xxx={rrd1}:MemSharedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpTtNon.name=MemSpTtNon
report.vmware3.MemSpTtNon.columns=MemSpTtNon
@@ -1130,7 +1130,7 @@ DEF:xxx={rrd1}:MemSpTtNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpTtNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpinNon.name=MemSpinNon
report.vmware3.MemSpinNon.columns=MemSpinNon
@@ -1141,7 +1141,7 @@ DEF:xxx={rrd1}:MemSpinNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpinNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSpoutNon.name=MemSpoutNon
report.vmware3.MemSpoutNon.columns=MemSpoutNon
@@ -1152,7 +1152,7 @@ DEF:xxx={rrd1}:MemSpoutNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemSppedNon.name=MemSppedNon
report.vmware3.MemSppedNon.columns=MemSppedNon
@@ -1163,7 +1163,7 @@ DEF:xxx={rrd1}:MemSppedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSppedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemUsageNon.name=MemUsageNon
report.vmware3.MemUsageNon.columns=MemUsageNon
@@ -1174,7 +1174,7 @@ DEF:xxx={rrd1}:MemUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemVmmemctlNon.name=MemVmmemctlNon
report.vmware3.MemVmmemctlNon.columns=MemVmmemctlNon
@@ -1185,7 +1185,7 @@ DEF:xxx={rrd1}:MemVmmemctlNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemVmmemctlTtNon.name=MemVmmemctlTtNon
report.vmware3.MemVmmemctlTtNon.columns=MemVmmemctlTtNon
@@ -1196,7 +1196,7 @@ DEF:xxx={rrd1}:MemVmmemctlTtNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlTtNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.MemZeroNon.name=MemZeroNon
report.vmware3.MemZeroNon.columns=MemZeroNon
@@ -1207,7 +1207,7 @@ DEF:xxx={rrd1}:MemZeroNon:AVERAGE \
LINE2:xxx#0000ff:"MemZeroNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetUsageNon.name=NetUsageNon
report.vmware3.NetUsageNon.columns=NetUsageNon
@@ -1218,7 +1218,7 @@ DEF:xxx={rrd1}:NetUsageNon:AVERAGE \
LINE2:xxx#0000ff:"NetUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav15Lat.name=ResCpuActav15Lat
report.vmware3.ResCpuActav15Lat.columns=ResCpuActav15Lat
@@ -1229,7 +1229,7 @@ DEF:xxx={rrd1}:ResCpuActav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav1Lat.name=ResCpuActav1Lat
report.vmware3.ResCpuActav1Lat.columns=ResCpuActav1Lat
@@ -1240,7 +1240,7 @@ DEF:xxx={rrd1}:ResCpuActav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActav5Lat.name=ResCpuActav5Lat
report.vmware3.ResCpuActav5Lat.columns=ResCpuActav5Lat
@@ -1251,7 +1251,7 @@ DEF:xxx={rrd1}:ResCpuActav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk15Lat.name=ResCpuActpk15Lat
report.vmware3.ResCpuActpk15Lat.columns=ResCpuActpk15Lat
@@ -1262,7 +1262,7 @@ DEF:xxx={rrd1}:ResCpuActpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk1Lat.name=ResCpuActpk1Lat
report.vmware3.ResCpuActpk1Lat.columns=ResCpuActpk1Lat
@@ -1273,7 +1273,7 @@ DEF:xxx={rrd1}:ResCpuActpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuActpk5Lat.name=ResCpuActpk5Lat
report.vmware3.ResCpuActpk5Lat.columns=ResCpuActpk5Lat
@@ -1284,7 +1284,7 @@ DEF:xxx={rrd1}:ResCpuActpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd15Lat.name=ResCpuMaxLd15Lat
report.vmware3.ResCpuMaxLd15Lat.columns=ResCpuMaxLd15Lat
@@ -1295,7 +1295,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd1Lat.name=ResCpuMaxLd1Lat
report.vmware3.ResCpuMaxLd1Lat.columns=ResCpuMaxLd1Lat
@@ -1306,7 +1306,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuMaxLd5Lat.name=ResCpuMaxLd5Lat
report.vmware3.ResCpuMaxLd5Lat.columns=ResCpuMaxLd5Lat
@@ -1317,7 +1317,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav15Lat.name=ResCpuRunav15Lat
report.vmware3.ResCpuRunav15Lat.columns=ResCpuRunav15Lat
@@ -1328,7 +1328,7 @@ DEF:xxx={rrd1}:ResCpuRunav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav1Lat.name=ResCpuRunav1Lat
report.vmware3.ResCpuRunav1Lat.columns=ResCpuRunav1Lat
@@ -1339,7 +1339,7 @@ DEF:xxx={rrd1}:ResCpuRunav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunav5Lat.name=ResCpuRunav5Lat
report.vmware3.ResCpuRunav5Lat.columns=ResCpuRunav5Lat
@@ -1350,7 +1350,7 @@ DEF:xxx={rrd1}:ResCpuRunav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk15Lat.name=ResCpuRunpk15Lat
report.vmware3.ResCpuRunpk15Lat.columns=ResCpuRunpk15Lat
@@ -1361,7 +1361,7 @@ DEF:xxx={rrd1}:ResCpuRunpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk1Lat.name=ResCpuRunpk1Lat
report.vmware3.ResCpuRunpk1Lat.columns=ResCpuRunpk1Lat
@@ -1372,7 +1372,7 @@ DEF:xxx={rrd1}:ResCpuRunpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuRunpk5Lat.name=ResCpuRunpk5Lat
report.vmware3.ResCpuRunpk5Lat.columns=ResCpuRunpk5Lat
@@ -1383,7 +1383,7 @@ DEF:xxx={rrd1}:ResCpuRunpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuSeCtLat.name=ResCpuSeCtLat
report.vmware3.ResCpuSeCtLat.columns=ResCpuSeCtLat
@@ -1394,7 +1394,7 @@ DEF:xxx={rrd1}:ResCpuSeCtLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSeCtLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.ResCpuSePeriodLat.name=ResCpuSePeriodLat
report.vmware3.ResCpuSePeriodLat.columns=ResCpuSePeriodLat
@@ -1405,7 +1405,7 @@ DEF:xxx={rrd1}:ResCpuSePeriodLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSePeriodLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.SysHeartbeatSum.name=SysHeartbeatSum
report.vmware3.SysHeartbeatSum.columns=SysHeartbeatSum
@@ -1416,7 +1416,7 @@ DEF:xxx={rrd1}:SysHeartbeatSum:AVERAGE \
LINE2:xxx#0000ff:"SysHeartbeatSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.SysUptimeLat.name=SysUptimeLat
report.vmware3.SysUptimeLat.columns=SysUptimeLat
@@ -1427,7 +1427,7 @@ DEF:xxx={rrd1}:SysUptimeLat:AVERAGE \
LINE2:xxx#0000ff:"SysUptimeLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetPacketsRxSum.name=NetPacketsRxSum
report.vmware3.NetPacketsRxSum.columns=NetPacketsRxSum
@@ -1439,7 +1439,7 @@ DEF:xxx={rrd1}:NetPacketsRxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsRxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetPacketsTxSum.name=NetPacketsTxSum
report.vmware3.NetPacketsTxSum.columns=NetPacketsTxSum
@@ -1451,7 +1451,7 @@ DEF:xxx={rrd1}:NetPacketsTxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsTxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetReceivedAvg.name=NetReceivedAvg
report.vmware3.NetReceivedAvg.columns=NetReceivedAvg
@@ -1463,7 +1463,7 @@ DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetReceivedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.NetTransmittedAvg.name=NetTransmittedAvg
report.vmware3.NetTransmittedAvg.columns=NetTransmittedAvg
@@ -1475,7 +1475,7 @@ DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetTransmittedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskBusResetsSum.name=DiskBusResetsSum
report.vmware3.DiskBusResetsSum.columns=DiskBusResetsSum
@@ -1487,7 +1487,7 @@ DEF:xxx={rrd1}:DiskBusResetsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskBusResetsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskCsAdSum.name=DiskCsAdSum
report.vmware3.DiskCsAdSum.columns=DiskCsAdSum
@@ -1499,7 +1499,7 @@ DEF:xxx={rrd1}:DiskCsAdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsAdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskCsSum.name=DiskCsSum
report.vmware3.DiskCsSum.columns=DiskCsSum
@@ -1511,7 +1511,7 @@ DEF:xxx={rrd1}:DiskCsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskNrRdSum.name=DiskNrRdSum
report.vmware3.DiskNrRdSum.columns=DiskNrRdSum
@@ -1523,7 +1523,7 @@ DEF:xxx={rrd1}:DiskNrRdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrRdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskNrWeSum.name=DiskNrWeSum
report.vmware3.DiskNrWeSum.columns=DiskNrWeSum
@@ -1535,7 +1535,7 @@ DEF:xxx={rrd1}:DiskNrWeSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrWeSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskRdAvg.name=DiskRdAvg
report.vmware3.DiskRdAvg.columns=DiskRdAvg
@@ -1547,7 +1547,7 @@ DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskRdAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware3.DiskWeAvg.name=DiskWeAvg
report.vmware3.DiskWeAvg.columns=DiskWeAvg
@@ -1559,5 +1559,5 @@ DEF:xxx={rrd1}:DiskWeAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskWeAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/vmware4-graph-simple.properties b/snmp-graph.properties.d/vmware4-graph-simple.properties
index ffdd6f8..3211f0e 100644
--- a/snmp-graph.properties.d/vmware4-graph-simple.properties
+++ b/snmp-graph.properties.d/vmware4-graph-simple.properties
@@ -165,7 +165,7 @@ DEF:xxx={rrd1}:CpuIdleSum:AVERAGE \
LINE2:xxx#0000ff:"CpuIdleSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsageNon.name=CpuUsageNon
report.vmware4.CpuUsageNon.columns=CpuUsageNon
@@ -177,7 +177,7 @@ DEF:xxx={rrd1}:CpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsedSum.name=CpuUsedSum
report.vmware4.CpuUsedSum.columns=CpuUsedSum
@@ -189,7 +189,7 @@ DEF:xxx={rrd1}:CpuUsedSum:AVERAGE \
LINE2:xxx#0000ff:"CpuUsedSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuRdCyAvg.name=CpuRdCyAvg
report.vmware4.CpuRdCyAvg.columns=CpuRdCyAvg
@@ -200,7 +200,7 @@ DEF:xxx={rrd1}:CpuRdCyAvg:AVERAGE \
LINE2:xxx#0000ff:"CpuRdCyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware4.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -211,7 +211,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskMaxTlLyLat.name=DiskMaxTlLyLat
report.vmware4.DiskMaxTlLyLat.columns=DiskMaxTlLyLat
@@ -222,7 +222,7 @@ DEF:xxx={rrd1}:DiskMaxTlLyLat:AVERAGE \
LINE2:xxx#0000ff:"DiskMaxTlLyLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskUsageNon.name=DiskUsageNon
report.vmware4.DiskUsageNon.columns=DiskUsageNon
@@ -233,7 +233,7 @@ DEF:xxx={rrd1}:DiskUsageNon:AVERAGE \
LINE2:xxx#0000ff:"DiskUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemActiveNon.name=MemActiveNon
report.vmware4.MemActiveNon.columns=MemActiveNon
@@ -244,7 +244,7 @@ DEF:xxx={rrd1}:MemActiveNon:AVERAGE \
LINE2:xxx#0000ff:"MemActiveNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemConsumedNon.name=MemConsumedNon
report.vmware4.MemConsumedNon.columns=MemConsumedNon
@@ -255,7 +255,7 @@ DEF:xxx={rrd1}:MemConsumedNon:AVERAGE \
LINE2:xxx#0000ff:"MemConsumedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemGrantedNon.name=MemGrantedNon
report.vmware4.MemGrantedNon.columns=MemGrantedNon
@@ -266,7 +266,7 @@ DEF:xxx={rrd1}:MemGrantedNon:AVERAGE \
LINE2:xxx#0000ff:"MemGrantedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemHeapNon.name=MemHeapNon
report.vmware4.MemHeapNon.columns=MemHeapNon
@@ -277,7 +277,7 @@ DEF:xxx={rrd1}:MemHeapNon:AVERAGE \
LINE2:xxx#0000ff:"MemHeapNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemHeapfreeNon.name=MemHeapfreeNon
report.vmware4.MemHeapfreeNon.columns=MemHeapfreeNon
@@ -288,7 +288,7 @@ DEF:xxx={rrd1}:MemHeapfreeNon:AVERAGE \
LINE2:xxx#0000ff:"MemHeapfreeNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemOdNon.name=MemOdNon
report.vmware4.MemOdNon.columns=MemOdNon
@@ -299,7 +299,7 @@ DEF:xxx={rrd1}:MemOdNon:AVERAGE \
LINE2:xxx#0000ff:"MemOdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemRdCyAvg.name=MemRdCyAvg
report.vmware4.MemRdCyAvg.columns=MemRdCyAvg
@@ -310,7 +310,7 @@ DEF:xxx={rrd1}:MemRdCyAvg:AVERAGE \
LINE2:xxx#0000ff:"MemRdCyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSharedNon.name=MemSharedNon
report.vmware4.MemSharedNon.columns=MemSharedNon
@@ -321,7 +321,7 @@ DEF:xxx={rrd1}:MemSharedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSharedcommonNon.name=MemSharedcommonNon
report.vmware4.MemSharedcommonNon.columns=MemSharedcommonNon
@@ -332,7 +332,7 @@ DEF:xxx={rrd1}:MemSharedcommonNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedcommonNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpinNon.name=MemSpinNon
report.vmware4.MemSpinNon.columns=MemSpinNon
@@ -343,7 +343,7 @@ DEF:xxx={rrd1}:MemSpinNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpinNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpinReAvg.name=MemSpinReAvg
report.vmware4.MemSpinReAvg.columns=MemSpinReAvg
@@ -354,7 +354,7 @@ DEF:xxx={rrd1}:MemSpinReAvg:AVERAGE \
LINE2:xxx#0000ff:"MemSpinReAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpoutNon.name=MemSpoutNon
report.vmware4.MemSpoutNon.columns=MemSpoutNon
@@ -365,7 +365,7 @@ DEF:xxx={rrd1}:MemSpoutNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpoutReAvg.name=MemSpoutReAvg
report.vmware4.MemSpoutReAvg.columns=MemSpoutReAvg
@@ -376,7 +376,7 @@ DEF:xxx={rrd1}:MemSpoutReAvg:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutReAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpusedNon.name=MemSpusedNon
report.vmware4.MemSpusedNon.columns=MemSpusedNon
@@ -387,7 +387,7 @@ DEF:xxx={rrd1}:MemSpusedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpusedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemStateLat.name=MemStateLat
report.vmware4.MemStateLat.columns=MemStateLat
@@ -398,7 +398,7 @@ DEF:xxx={rrd1}:MemStateLat:AVERAGE \
LINE2:xxx#0000ff:"MemStateLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSysUsageNon.name=MemSysUsageNon
report.vmware4.MemSysUsageNon.columns=MemSysUsageNon
@@ -409,7 +409,7 @@ DEF:xxx={rrd1}:MemSysUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemSysUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemUdNon.name=MemUdNon
report.vmware4.MemUdNon.columns=MemUdNon
@@ -420,7 +420,7 @@ DEF:xxx={rrd1}:MemUdNon:AVERAGE \
LINE2:xxx#0000ff:"MemUdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemUsageNon.name=MemUsageNon
report.vmware4.MemUsageNon.columns=MemUsageNon
@@ -431,7 +431,7 @@ DEF:xxx={rrd1}:MemUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemVmmemctlNon.name=MemVmmemctlNon
report.vmware4.MemVmmemctlNon.columns=MemVmmemctlNon
@@ -442,7 +442,7 @@ DEF:xxx={rrd1}:MemVmmemctlNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemZeroNon.name=MemZeroNon
report.vmware4.MemZeroNon.columns=MemZeroNon
@@ -453,7 +453,7 @@ DEF:xxx={rrd1}:MemZeroNon:AVERAGE \
LINE2:xxx#0000ff:"MemZeroNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetTransmittedAvg.name=NetTransmittedAvg
report.vmware4.NetTransmittedAvg.columns=NetTransmittedAvg
@@ -464,7 +464,7 @@ DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetTransmittedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetUsageNon.name=NetUsageNon
report.vmware4.NetUsageNon.columns=NetUsageNon
@@ -475,7 +475,7 @@ DEF:xxx={rrd1}:NetUsageNon:AVERAGE \
LINE2:xxx#0000ff:"NetUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav15Lat.name=ResCpuActav15Lat
report.vmware4.ResCpuActav15Lat.columns=ResCpuActav15Lat
@@ -486,7 +486,7 @@ DEF:xxx={rrd1}:ResCpuActav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav1Lat.name=ResCpuActav1Lat
report.vmware4.ResCpuActav1Lat.columns=ResCpuActav1Lat
@@ -497,7 +497,7 @@ DEF:xxx={rrd1}:ResCpuActav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav5Lat.name=ResCpuActav5Lat
report.vmware4.ResCpuActav5Lat.columns=ResCpuActav5Lat
@@ -508,7 +508,7 @@ DEF:xxx={rrd1}:ResCpuActav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk15Lat.name=ResCpuActpk15Lat
report.vmware4.ResCpuActpk15Lat.columns=ResCpuActpk15Lat
@@ -519,7 +519,7 @@ DEF:xxx={rrd1}:ResCpuActpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk1Lat.name=ResCpuActpk1Lat
report.vmware4.ResCpuActpk1Lat.columns=ResCpuActpk1Lat
@@ -530,7 +530,7 @@ DEF:xxx={rrd1}:ResCpuActpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk5Lat.name=ResCpuActpk5Lat
report.vmware4.ResCpuActpk5Lat.columns=ResCpuActpk5Lat
@@ -541,7 +541,7 @@ DEF:xxx={rrd1}:ResCpuActpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd15Lat.name=ResCpuMaxLd15Lat
report.vmware4.ResCpuMaxLd15Lat.columns=ResCpuMaxLd15Lat
@@ -552,7 +552,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd1Lat.name=ResCpuMaxLd1Lat
report.vmware4.ResCpuMaxLd1Lat.columns=ResCpuMaxLd1Lat
@@ -563,7 +563,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd5Lat.name=ResCpuMaxLd5Lat
report.vmware4.ResCpuMaxLd5Lat.columns=ResCpuMaxLd5Lat
@@ -574,7 +574,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav15Lat.name=ResCpuRunav15Lat
report.vmware4.ResCpuRunav15Lat.columns=ResCpuRunav15Lat
@@ -585,7 +585,7 @@ DEF:xxx={rrd1}:ResCpuRunav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav1Lat.name=ResCpuRunav1Lat
report.vmware4.ResCpuRunav1Lat.columns=ResCpuRunav1Lat
@@ -596,7 +596,7 @@ DEF:xxx={rrd1}:ResCpuRunav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav5Lat.name=ResCpuRunav5Lat
report.vmware4.ResCpuRunav5Lat.columns=ResCpuRunav5Lat
@@ -607,7 +607,7 @@ DEF:xxx={rrd1}:ResCpuRunav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk15Lat.name=ResCpuRunpk15Lat
report.vmware4.ResCpuRunpk15Lat.columns=ResCpuRunpk15Lat
@@ -618,7 +618,7 @@ DEF:xxx={rrd1}:ResCpuRunpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk1Lat.name=ResCpuRunpk1Lat
report.vmware4.ResCpuRunpk1Lat.columns=ResCpuRunpk1Lat
@@ -629,7 +629,7 @@ DEF:xxx={rrd1}:ResCpuRunpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk5Lat.name=ResCpuRunpk5Lat
report.vmware4.ResCpuRunpk5Lat.columns=ResCpuRunpk5Lat
@@ -640,7 +640,7 @@ DEF:xxx={rrd1}:ResCpuRunpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuSeCtLat.name=ResCpuSeCtLat
report.vmware4.ResCpuSeCtLat.columns=ResCpuSeCtLat
@@ -651,7 +651,7 @@ DEF:xxx={rrd1}:ResCpuSeCtLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSeCtLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuSePeriodLat.name=ResCpuSePeriodLat
report.vmware4.ResCpuSePeriodLat.columns=ResCpuSePeriodLat
@@ -662,7 +662,7 @@ DEF:xxx={rrd1}:ResCpuSePeriodLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSePeriodLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysUptimeLat.name=SysUptimeLat
report.vmware4.SysUptimeLat.columns=SysUptimeLat
@@ -673,7 +673,7 @@ DEF:xxx={rrd1}:SysUptimeLat:AVERAGE \
LINE2:xxx#0000ff:"SysUptimeLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MgtAgtMemUsedAvg.name=MgtAgtMemUsedAvg
report.vmware4.MgtAgtMemUsedAvg.columns=MgtAgtMemUsedAvg
@@ -685,7 +685,7 @@ DEF:xxx={rrd1}:MgtAgtMemUsedAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtMemUsedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MgtAgtSpInAvg.name=MgtAgtSpInAvg
report.vmware4.MgtAgtSpInAvg.columns=MgtAgtSpInAvg
@@ -697,7 +697,7 @@ DEF:xxx={rrd1}:MgtAgtSpInAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpInAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MgtAgtSpOutAvg.name=MgtAgtSpOutAvg
report.vmware4.MgtAgtSpOutAvg.columns=MgtAgtSpOutAvg
@@ -709,7 +709,7 @@ DEF:xxx={rrd1}:MgtAgtSpOutAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpOutAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MgtAgtSpUsedAvg.name=MgtAgtSpUsedAvg
report.vmware4.MgtAgtSpUsedAvg.columns=MgtAgtSpUsedAvg
@@ -721,7 +721,7 @@ DEF:xxx={rrd1}:MgtAgtSpUsedAvg:AVERAGE \
LINE2:xxx#0000ff:"MgtAgtSpUsedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetDroppedRxSum.name=NetDroppedRxSum
report.vmware4.NetDroppedRxSum.columns=NetDroppedRxSum
@@ -733,7 +733,7 @@ DEF:xxx={rrd1}:NetDroppedRxSum:AVERAGE \
LINE2:xxx#0000ff:"NetDroppedRxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetDroppedTxSum.name=NetDroppedTxSum
report.vmware4.NetDroppedTxSum.columns=NetDroppedTxSum
@@ -745,7 +745,7 @@ DEF:xxx={rrd1}:NetDroppedTxSum:AVERAGE \
LINE2:xxx#0000ff:"NetDroppedTxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetPacketsRxSum.name=NetPacketsRxSum
report.vmware4.NetPacketsRxSum.columns=NetPacketsRxSum
@@ -757,7 +757,7 @@ DEF:xxx={rrd1}:NetPacketsRxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsRxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetPacketsTxSum.name=NetPacketsTxSum
report.vmware4.NetPacketsTxSum.columns=NetPacketsTxSum
@@ -769,7 +769,7 @@ DEF:xxx={rrd1}:NetPacketsTxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsTxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetReceivedAvg.name=NetReceivedAvg
report.vmware4.NetReceivedAvg.columns=NetReceivedAvg
@@ -781,7 +781,7 @@ DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetReceivedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetTransmittedAvg.name=NetTransmittedAvg
report.vmware4.NetTransmittedAvg.columns=NetTransmittedAvg
@@ -793,7 +793,7 @@ DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetTransmittedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskBusResetsSum.name=DiskBusResetsSum
report.vmware4.DiskBusResetsSum.columns=DiskBusResetsSum
@@ -805,7 +805,7 @@ DEF:xxx={rrd1}:DiskBusResetsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskBusResetsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskCsAdSum.name=DiskCsAdSum
report.vmware4.DiskCsAdSum.columns=DiskCsAdSum
@@ -817,7 +817,7 @@ DEF:xxx={rrd1}:DiskCsAdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsAdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskCsSum.name=DiskCsSum
report.vmware4.DiskCsSum.columns=DiskCsSum
@@ -829,7 +829,7 @@ DEF:xxx={rrd1}:DiskCsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskDeLyAvg.name=DiskDeLyAvg
report.vmware4.DiskDeLyAvg.columns=DiskDeLyAvg
@@ -841,7 +841,7 @@ DEF:xxx={rrd1}:DiskDeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskDeRdLyAvg.name=DiskDeRdLyAvg
report.vmware4.DiskDeRdLyAvg.columns=DiskDeRdLyAvg
@@ -853,7 +853,7 @@ DEF:xxx={rrd1}:DiskDeRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskDeWeLyAvg.name=DiskDeWeLyAvg
report.vmware4.DiskDeWeLyAvg.columns=DiskDeWeLyAvg
@@ -865,7 +865,7 @@ DEF:xxx={rrd1}:DiskDeWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskDeWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskKlLyAvg.name=DiskKlLyAvg
report.vmware4.DiskKlLyAvg.columns=DiskKlLyAvg
@@ -877,7 +877,7 @@ DEF:xxx={rrd1}:DiskKlLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskKlRdLyAvg.name=DiskKlRdLyAvg
report.vmware4.DiskKlRdLyAvg.columns=DiskKlRdLyAvg
@@ -889,7 +889,7 @@ DEF:xxx={rrd1}:DiskKlRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskKlWeLyAvg.name=DiskKlWeLyAvg
report.vmware4.DiskKlWeLyAvg.columns=DiskKlWeLyAvg
@@ -901,7 +901,7 @@ DEF:xxx={rrd1}:DiskKlWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskKlWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskNrRdSum.name=DiskNrRdSum
report.vmware4.DiskNrRdSum.columns=DiskNrRdSum
@@ -913,7 +913,7 @@ DEF:xxx={rrd1}:DiskNrRdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrRdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskNrWeSum.name=DiskNrWeSum
report.vmware4.DiskNrWeSum.columns=DiskNrWeSum
@@ -925,7 +925,7 @@ DEF:xxx={rrd1}:DiskNrWeSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrWeSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskQeLyAvg.name=DiskQeLyAvg
report.vmware4.DiskQeLyAvg.columns=DiskQeLyAvg
@@ -937,7 +937,7 @@ DEF:xxx={rrd1}:DiskQeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskQeRdLyAvg.name=DiskQeRdLyAvg
report.vmware4.DiskQeRdLyAvg.columns=DiskQeRdLyAvg
@@ -949,7 +949,7 @@ DEF:xxx={rrd1}:DiskQeRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskQeWeLyAvg.name=DiskQeWeLyAvg
report.vmware4.DiskQeWeLyAvg.columns=DiskQeWeLyAvg
@@ -961,7 +961,7 @@ DEF:xxx={rrd1}:DiskQeWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskQeWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskRdAvg.name=DiskRdAvg
report.vmware4.DiskRdAvg.columns=DiskRdAvg
@@ -973,7 +973,7 @@ DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskRdAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskTlLyAvg.name=DiskTlLyAvg
report.vmware4.DiskTlLyAvg.columns=DiskTlLyAvg
@@ -985,7 +985,7 @@ DEF:xxx={rrd1}:DiskTlLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskTlRdLyAvg.name=DiskTlRdLyAvg
report.vmware4.DiskTlRdLyAvg.columns=DiskTlRdLyAvg
@@ -997,7 +997,7 @@ DEF:xxx={rrd1}:DiskTlRdLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlRdLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskTlWeLyAvg.name=DiskTlWeLyAvg
report.vmware4.DiskTlWeLyAvg.columns=DiskTlWeLyAvg
@@ -1009,7 +1009,7 @@ DEF:xxx={rrd1}:DiskTlWeLyAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskTlWeLyAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskWeAvg.name=DiskWeAvg
report.vmware4.DiskWeAvg.columns=DiskWeAvg
@@ -1021,7 +1021,7 @@ DEF:xxx={rrd1}:DiskWeAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskWeAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysDiskUsageLat.name=SysDiskUsageLat
report.vmware4.SysDiskUsageLat.columns=SysDiskUsageLat
@@ -1033,7 +1033,7 @@ DEF:xxx={rrd1}:SysDiskUsageLat:AVERAGE \
LINE2:xxx#0000ff:"SysDiskUsageLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuAcMaxLat.name=SysReCpuAcMaxLat
report.vmware4.SysReCpuAcMaxLat.columns=SysReCpuAcMaxLat
@@ -1045,7 +1045,7 @@ DEF:xxx={rrd1}:SysReCpuAcMaxLat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuAcMaxLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuAcMinLat.name=SysReCpuAcMinLat
report.vmware4.SysReCpuAcMinLat.columns=SysReCpuAcMinLat
@@ -1057,7 +1057,7 @@ DEF:xxx={rrd1}:SysReCpuAcMinLat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuAcMinLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuAcSsLat.name=SysReCpuAcSsLat
report.vmware4.SysReCpuAcSsLat.columns=SysReCpuAcSsLat
@@ -1069,7 +1069,7 @@ DEF:xxx={rrd1}:SysReCpuAcSsLat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuAcSsLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuAct1Lat.name=SysReCpuAct1Lat
report.vmware4.SysReCpuAct1Lat.columns=SysReCpuAct1Lat
@@ -1081,7 +1081,7 @@ DEF:xxx={rrd1}:SysReCpuAct1Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuAct1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuAct5Lat.name=SysReCpuAct5Lat
report.vmware4.SysReCpuAct5Lat.columns=SysReCpuAct5Lat
@@ -1093,7 +1093,7 @@ DEF:xxx={rrd1}:SysReCpuAct5Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuAct5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuMaxLd1Lat.name=SysReCpuMaxLd1Lat
report.vmware4.SysReCpuMaxLd1Lat.columns=SysReCpuMaxLd1Lat
@@ -1105,7 +1105,7 @@ DEF:xxx={rrd1}:SysReCpuMaxLd1Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuMaxLd1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuMaxLd5Lat.name=SysReCpuMaxLd5Lat
report.vmware4.SysReCpuMaxLd5Lat.columns=SysReCpuMaxLd5Lat
@@ -1117,7 +1117,7 @@ DEF:xxx={rrd1}:SysReCpuMaxLd5Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuMaxLd5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuRun1Lat.name=SysReCpuRun1Lat
report.vmware4.SysReCpuRun1Lat.columns=SysReCpuRun1Lat
@@ -1129,7 +1129,7 @@ DEF:xxx={rrd1}:SysReCpuRun1Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuRun1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuRun5Lat.name=SysReCpuRun5Lat
report.vmware4.SysReCpuRun5Lat.columns=SysReCpuRun5Lat
@@ -1141,7 +1141,7 @@ DEF:xxx={rrd1}:SysReCpuRun5Lat:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuRun5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReCpuUsageNon.name=SysReCpuUsageNon
report.vmware4.SysReCpuUsageNon.columns=SysReCpuUsageNon
@@ -1153,7 +1153,7 @@ DEF:xxx={rrd1}:SysReCpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"SysReCpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemAcMaxLat.name=SysReMemAcMaxLat
report.vmware4.SysReMemAcMaxLat.columns=SysReMemAcMaxLat
@@ -1165,7 +1165,7 @@ DEF:xxx={rrd1}:SysReMemAcMaxLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemAcMaxLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemAcMinLat.name=SysReMemAcMinLat
report.vmware4.SysReMemAcMinLat.columns=SysReMemAcMinLat
@@ -1177,7 +1177,7 @@ DEF:xxx={rrd1}:SysReMemAcMinLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemAcMinLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemAcSsLat.name=SysReMemAcSsLat
report.vmware4.SysReMemAcSsLat.columns=SysReMemAcSsLat
@@ -1189,7 +1189,7 @@ DEF:xxx={rrd1}:SysReMemAcSsLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemAcSsLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemCowLat.name=SysReMemCowLat
report.vmware4.SysReMemCowLat.columns=SysReMemCowLat
@@ -1201,7 +1201,7 @@ DEF:xxx={rrd1}:SysReMemCowLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemCowLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemMappedLat.name=SysReMemMappedLat
report.vmware4.SysReMemMappedLat.columns=SysReMemMappedLat
@@ -1213,7 +1213,7 @@ DEF:xxx={rrd1}:SysReMemMappedLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemMappedLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemOdLat.name=SysReMemOdLat
report.vmware4.SysReMemOdLat.columns=SysReMemOdLat
@@ -1225,7 +1225,7 @@ DEF:xxx={rrd1}:SysReMemOdLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemOdLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemSharedLat.name=SysReMemSharedLat
report.vmware4.SysReMemSharedLat.columns=SysReMemSharedLat
@@ -1237,7 +1237,7 @@ DEF:xxx={rrd1}:SysReMemSharedLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemSharedLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemSppedLat.name=SysReMemSppedLat
report.vmware4.SysReMemSppedLat.columns=SysReMemSppedLat
@@ -1249,7 +1249,7 @@ DEF:xxx={rrd1}:SysReMemSppedLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemSppedLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemTdLat.name=SysReMemTdLat
report.vmware4.SysReMemTdLat.columns=SysReMemTdLat
@@ -1261,7 +1261,7 @@ DEF:xxx={rrd1}:SysReMemTdLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemTdLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysReMemZeroLat.name=SysReMemZeroLat
report.vmware4.SysReMemZeroLat.columns=SysReMemZeroLat
@@ -1273,7 +1273,7 @@ DEF:xxx={rrd1}:SysReMemZeroLat:AVERAGE \
LINE2:xxx#0000ff:"SysReMemZeroLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuRdySum.name=CpuRdySum
report.vmware4.CpuRdySum.columns=CpuRdySum
@@ -1285,7 +1285,7 @@ DEF:xxx={rrd1}:CpuRdySum:AVERAGE \
LINE2:xxx#0000ff:"CpuRdySum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuSpwaitSum.name=CpuSpwaitSum
report.vmware4.CpuSpwaitSum.columns=CpuSpwaitSum
@@ -1297,7 +1297,7 @@ DEF:xxx={rrd1}:CpuSpwaitSum:AVERAGE \
LINE2:xxx#0000ff:"CpuSpwaitSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuSystemSum.name=CpuSystemSum
report.vmware4.CpuSystemSum.columns=CpuSystemSum
@@ -1309,7 +1309,7 @@ DEF:xxx={rrd1}:CpuSystemSum:AVERAGE \
LINE2:xxx#0000ff:"CpuSystemSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware4.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -1321,7 +1321,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsedSum.name=CpuUsedSum
report.vmware4.CpuUsedSum.columns=CpuUsedSum
@@ -1333,7 +1333,7 @@ DEF:xxx={rrd1}:CpuUsedSum:AVERAGE \
LINE2:xxx#0000ff:"CpuUsedSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuWaitSum.name=CpuWaitSum
report.vmware4.CpuWaitSum.columns=CpuWaitSum
@@ -1345,7 +1345,7 @@ DEF:xxx={rrd1}:CpuWaitSum:AVERAGE \
LINE2:xxx#0000ff:"CpuWaitSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuRdySum.name=CpuRdySum
report.vmware4.CpuRdySum.columns=CpuRdySum
@@ -1356,7 +1356,7 @@ DEF:xxx={rrd1}:CpuRdySum:AVERAGE \
LINE2:xxx#0000ff:"CpuRdySum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsageNon.name=CpuUsageNon
report.vmware4.CpuUsageNon.columns=CpuUsageNon
@@ -1367,7 +1367,7 @@ DEF:xxx={rrd1}:CpuUsageNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.CpuUsagemhzNon.name=CpuUsagemhzNon
report.vmware4.CpuUsagemhzNon.columns=CpuUsagemhzNon
@@ -1378,7 +1378,7 @@ DEF:xxx={rrd1}:CpuUsagemhzNon:AVERAGE \
LINE2:xxx#0000ff:"CpuUsagemhzNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskRdAvg.name=DiskRdAvg
report.vmware4.DiskRdAvg.columns=DiskRdAvg
@@ -1389,7 +1389,7 @@ DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskRdAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskUsageNon.name=DiskUsageNon
report.vmware4.DiskUsageNon.columns=DiskUsageNon
@@ -1400,7 +1400,7 @@ DEF:xxx={rrd1}:DiskUsageNon:AVERAGE \
LINE2:xxx#0000ff:"DiskUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemActiveNon.name=MemActiveNon
report.vmware4.MemActiveNon.columns=MemActiveNon
@@ -1411,7 +1411,7 @@ DEF:xxx={rrd1}:MemActiveNon:AVERAGE \
LINE2:xxx#0000ff:"MemActiveNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemConsumedNon.name=MemConsumedNon
report.vmware4.MemConsumedNon.columns=MemConsumedNon
@@ -1422,7 +1422,7 @@ DEF:xxx={rrd1}:MemConsumedNon:AVERAGE \
LINE2:xxx#0000ff:"MemConsumedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemGrantedNon.name=MemGrantedNon
report.vmware4.MemGrantedNon.columns=MemGrantedNon
@@ -1433,7 +1433,7 @@ DEF:xxx={rrd1}:MemGrantedNon:AVERAGE \
LINE2:xxx#0000ff:"MemGrantedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemOdNon.name=MemOdNon
report.vmware4.MemOdNon.columns=MemOdNon
@@ -1444,7 +1444,7 @@ DEF:xxx={rrd1}:MemOdNon:AVERAGE \
LINE2:xxx#0000ff:"MemOdNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSharedNon.name=MemSharedNon
report.vmware4.MemSharedNon.columns=MemSharedNon
@@ -1455,7 +1455,7 @@ DEF:xxx={rrd1}:MemSharedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSharedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpTtNon.name=MemSpTtNon
report.vmware4.MemSpTtNon.columns=MemSpTtNon
@@ -1466,7 +1466,7 @@ DEF:xxx={rrd1}:MemSpTtNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpTtNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpinNon.name=MemSpinNon
report.vmware4.MemSpinNon.columns=MemSpinNon
@@ -1477,7 +1477,7 @@ DEF:xxx={rrd1}:MemSpinNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpinNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpinReAvg.name=MemSpinReAvg
report.vmware4.MemSpinReAvg.columns=MemSpinReAvg
@@ -1488,7 +1488,7 @@ DEF:xxx={rrd1}:MemSpinReAvg:AVERAGE \
LINE2:xxx#0000ff:"MemSpinReAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpoutNon.name=MemSpoutNon
report.vmware4.MemSpoutNon.columns=MemSpoutNon
@@ -1499,7 +1499,7 @@ DEF:xxx={rrd1}:MemSpoutNon:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSpoutReAvg.name=MemSpoutReAvg
report.vmware4.MemSpoutReAvg.columns=MemSpoutReAvg
@@ -1510,7 +1510,7 @@ DEF:xxx={rrd1}:MemSpoutReAvg:AVERAGE \
LINE2:xxx#0000ff:"MemSpoutReAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemSppedNon.name=MemSppedNon
report.vmware4.MemSppedNon.columns=MemSppedNon
@@ -1521,7 +1521,7 @@ DEF:xxx={rrd1}:MemSppedNon:AVERAGE \
LINE2:xxx#0000ff:"MemSppedNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemUsageNon.name=MemUsageNon
report.vmware4.MemUsageNon.columns=MemUsageNon
@@ -1532,7 +1532,7 @@ DEF:xxx={rrd1}:MemUsageNon:AVERAGE \
LINE2:xxx#0000ff:"MemUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemVmmemctlNon.name=MemVmmemctlNon
report.vmware4.MemVmmemctlNon.columns=MemVmmemctlNon
@@ -1543,7 +1543,7 @@ DEF:xxx={rrd1}:MemVmmemctlNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemVmmemctlTtNon.name=MemVmmemctlTtNon
report.vmware4.MemVmmemctlTtNon.columns=MemVmmemctlTtNon
@@ -1554,7 +1554,7 @@ DEF:xxx={rrd1}:MemVmmemctlTtNon:AVERAGE \
LINE2:xxx#0000ff:"MemVmmemctlTtNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.MemZeroNon.name=MemZeroNon
report.vmware4.MemZeroNon.columns=MemZeroNon
@@ -1565,7 +1565,7 @@ DEF:xxx={rrd1}:MemZeroNon:AVERAGE \
LINE2:xxx#0000ff:"MemZeroNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetReceivedAvg.name=NetReceivedAvg
report.vmware4.NetReceivedAvg.columns=NetReceivedAvg
@@ -1576,7 +1576,7 @@ DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetReceivedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetUsageNon.name=NetUsageNon
report.vmware4.NetUsageNon.columns=NetUsageNon
@@ -1587,7 +1587,7 @@ DEF:xxx={rrd1}:NetUsageNon:AVERAGE \
LINE2:xxx#0000ff:"NetUsageNon" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav15Lat.name=ResCpuActav15Lat
report.vmware4.ResCpuActav15Lat.columns=ResCpuActav15Lat
@@ -1598,7 +1598,7 @@ DEF:xxx={rrd1}:ResCpuActav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav1Lat.name=ResCpuActav1Lat
report.vmware4.ResCpuActav1Lat.columns=ResCpuActav1Lat
@@ -1609,7 +1609,7 @@ DEF:xxx={rrd1}:ResCpuActav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActav5Lat.name=ResCpuActav5Lat
report.vmware4.ResCpuActav5Lat.columns=ResCpuActav5Lat
@@ -1620,7 +1620,7 @@ DEF:xxx={rrd1}:ResCpuActav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk15Lat.name=ResCpuActpk15Lat
report.vmware4.ResCpuActpk15Lat.columns=ResCpuActpk15Lat
@@ -1631,7 +1631,7 @@ DEF:xxx={rrd1}:ResCpuActpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk1Lat.name=ResCpuActpk1Lat
report.vmware4.ResCpuActpk1Lat.columns=ResCpuActpk1Lat
@@ -1642,7 +1642,7 @@ DEF:xxx={rrd1}:ResCpuActpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuActpk5Lat.name=ResCpuActpk5Lat
report.vmware4.ResCpuActpk5Lat.columns=ResCpuActpk5Lat
@@ -1653,7 +1653,7 @@ DEF:xxx={rrd1}:ResCpuActpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuActpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd15Lat.name=ResCpuMaxLd15Lat
report.vmware4.ResCpuMaxLd15Lat.columns=ResCpuMaxLd15Lat
@@ -1664,7 +1664,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd1Lat.name=ResCpuMaxLd1Lat
report.vmware4.ResCpuMaxLd1Lat.columns=ResCpuMaxLd1Lat
@@ -1675,7 +1675,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuMaxLd5Lat.name=ResCpuMaxLd5Lat
report.vmware4.ResCpuMaxLd5Lat.columns=ResCpuMaxLd5Lat
@@ -1686,7 +1686,7 @@ DEF:xxx={rrd1}:ResCpuMaxLd5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuMaxLd5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav15Lat.name=ResCpuRunav15Lat
report.vmware4.ResCpuRunav15Lat.columns=ResCpuRunav15Lat
@@ -1697,7 +1697,7 @@ DEF:xxx={rrd1}:ResCpuRunav15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav1Lat.name=ResCpuRunav1Lat
report.vmware4.ResCpuRunav1Lat.columns=ResCpuRunav1Lat
@@ -1708,7 +1708,7 @@ DEF:xxx={rrd1}:ResCpuRunav1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunav5Lat.name=ResCpuRunav5Lat
report.vmware4.ResCpuRunav5Lat.columns=ResCpuRunav5Lat
@@ -1719,7 +1719,7 @@ DEF:xxx={rrd1}:ResCpuRunav5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunav5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk15Lat.name=ResCpuRunpk15Lat
report.vmware4.ResCpuRunpk15Lat.columns=ResCpuRunpk15Lat
@@ -1730,7 +1730,7 @@ DEF:xxx={rrd1}:ResCpuRunpk15Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk15Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk1Lat.name=ResCpuRunpk1Lat
report.vmware4.ResCpuRunpk1Lat.columns=ResCpuRunpk1Lat
@@ -1741,7 +1741,7 @@ DEF:xxx={rrd1}:ResCpuRunpk1Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk1Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuRunpk5Lat.name=ResCpuRunpk5Lat
report.vmware4.ResCpuRunpk5Lat.columns=ResCpuRunpk5Lat
@@ -1752,7 +1752,7 @@ DEF:xxx={rrd1}:ResCpuRunpk5Lat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuRunpk5Lat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuSeCtLat.name=ResCpuSeCtLat
report.vmware4.ResCpuSeCtLat.columns=ResCpuSeCtLat
@@ -1763,7 +1763,7 @@ DEF:xxx={rrd1}:ResCpuSeCtLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSeCtLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.ResCpuSePeriodLat.name=ResCpuSePeriodLat
report.vmware4.ResCpuSePeriodLat.columns=ResCpuSePeriodLat
@@ -1774,7 +1774,7 @@ DEF:xxx={rrd1}:ResCpuSePeriodLat:AVERAGE \
LINE2:xxx#0000ff:"ResCpuSePeriodLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysHeartbeatSum.name=SysHeartbeatSum
report.vmware4.SysHeartbeatSum.columns=SysHeartbeatSum
@@ -1785,7 +1785,7 @@ DEF:xxx={rrd1}:SysHeartbeatSum:AVERAGE \
LINE2:xxx#0000ff:"SysHeartbeatSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.SysUptimeLat.name=SysUptimeLat
report.vmware4.SysUptimeLat.columns=SysUptimeLat
@@ -1796,7 +1796,7 @@ DEF:xxx={rrd1}:SysUptimeLat:AVERAGE \
LINE2:xxx#0000ff:"SysUptimeLat" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetPacketsRxSum.name=NetPacketsRxSum
report.vmware4.NetPacketsRxSum.columns=NetPacketsRxSum
@@ -1808,7 +1808,7 @@ DEF:xxx={rrd1}:NetPacketsRxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsRxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetPacketsTxSum.name=NetPacketsTxSum
report.vmware4.NetPacketsTxSum.columns=NetPacketsTxSum
@@ -1820,7 +1820,7 @@ DEF:xxx={rrd1}:NetPacketsTxSum:AVERAGE \
LINE2:xxx#0000ff:"NetPacketsTxSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetReceivedAvg.name=NetReceivedAvg
report.vmware4.NetReceivedAvg.columns=NetReceivedAvg
@@ -1832,7 +1832,7 @@ DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetReceivedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.NetTransmittedAvg.name=NetTransmittedAvg
report.vmware4.NetTransmittedAvg.columns=NetTransmittedAvg
@@ -1844,7 +1844,7 @@ DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
LINE2:xxx#0000ff:"NetTransmittedAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskBusResetsSum.name=DiskBusResetsSum
report.vmware4.DiskBusResetsSum.columns=DiskBusResetsSum
@@ -1856,7 +1856,7 @@ DEF:xxx={rrd1}:DiskBusResetsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskBusResetsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskCsAdSum.name=DiskCsAdSum
report.vmware4.DiskCsAdSum.columns=DiskCsAdSum
@@ -1868,7 +1868,7 @@ DEF:xxx={rrd1}:DiskCsAdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsAdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskCsSum.name=DiskCsSum
report.vmware4.DiskCsSum.columns=DiskCsSum
@@ -1880,7 +1880,7 @@ DEF:xxx={rrd1}:DiskCsSum:AVERAGE \
LINE2:xxx#0000ff:"DiskCsSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskNrRdSum.name=DiskNrRdSum
report.vmware4.DiskNrRdSum.columns=DiskNrRdSum
@@ -1892,7 +1892,7 @@ DEF:xxx={rrd1}:DiskNrRdSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrRdSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskNrWeSum.name=DiskNrWeSum
report.vmware4.DiskNrWeSum.columns=DiskNrWeSum
@@ -1904,7 +1904,7 @@ DEF:xxx={rrd1}:DiskNrWeSum:AVERAGE \
LINE2:xxx#0000ff:"DiskNrWeSum" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskRdAvg.name=DiskRdAvg
report.vmware4.DiskRdAvg.columns=DiskRdAvg
@@ -1916,7 +1916,7 @@ DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskRdAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
report.vmware4.DiskWeAvg.name=DiskWeAvg
report.vmware4.DiskWeAvg.columns=DiskWeAvg
@@ -1928,5 +1928,5 @@ DEF:xxx={rrd1}:DiskWeAvg:AVERAGE \
LINE2:xxx#0000ff:"DiskWeAvg" \
GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
-GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/vmware6-graph-simple.properties b/snmp-graph.properties.d/vmware6-graph-simple.properties
new file mode 100644
index 0000000..6268027
--- /dev/null
+++ b/snmp-graph.properties.d/vmware6-graph-simple.properties
@@ -0,0 +1,2644 @@
+reports=vmware6.VrtDiskLeSsLat, \
+vmware6.VrtDiskMmSsLat, \
+vmware6.VrtDiskNrRdAdAvg, \
+vmware6.VrtDiskNrWeAdAvg, \
+vmware6.VrtDiskRdAvg, \
+vmware6.VrtDiskRdIOSizeLat, \
+vmware6.VrtDiskRdLdMcLat, \
+vmware6.VrtDiskRdLyUSLat, \
+vmware6.VrtDiskRdOIOLat, \
+vmware6.VrtDiskSlSsLat, \
+vmware6.VrtDiskTlRdLyAvg, \
+vmware6.VrtDiskTlWeLyAvg, \
+vmware6.VrtDiskWeAvg, \
+vmware6.VrtDiskWeIOSizeLat, \
+vmware6.VrtDiskWeLdMcLat, \
+vmware6.VrtDiskWeLyUSLat, \
+vmware6.VrtDiskWeOIOLat, \
+vmware6.CpuDemandAvg, \
+vmware6.CpuDmdEntRatioLat, \
+vmware6.CpuEntitlementLat, \
+vmware6.CpuLyAvg, \
+vmware6.CpuOverlapSum, \
+vmware6.CpuRdinessAvg, \
+vmware6.CpuSpwaitSum, \
+vmware6.CpuUsageAvg, \
+vmware6.CpuUsagemhzAvg, \
+vmware6.CpuUsedSum, \
+vmware6.CpuWaitSum, \
+vmware6.DaStMaxTlLyLat, \
+vmware6.DiskMaxTlLyLat, \
+vmware6.DiskRdAvg, \
+vmware6.DiskUsageAvg, \
+vmware6.DiskWeAvg, \
+vmware6.MemAeAvg, \
+vmware6.MemAeWeAvg, \
+vmware6.MemCdAvg, \
+vmware6.MemCnReAvg, \
+vmware6.MemCompressedAvg, \
+vmware6.MemDnReAvg, \
+vmware6.MemEntitlementAvg, \
+vmware6.MemGrantedAvg, \
+vmware6.MemLlSpInReAvg, \
+vmware6.MemLlSpOutReAvg, \
+vmware6.MemLlSpUsedAvg, \
+vmware6.MemLyAvg, \
+vmware6.MemOdAvg, \
+vmware6.MemOdMaxAvg, \
+vmware6.MemOdTdAvg, \
+vmware6.MemSharedAvg, \
+vmware6.MemSpTtAvg, \
+vmware6.MemSpinAvg, \
+vmware6.MemSpinReAvg, \
+vmware6.MemSpoutAvg, \
+vmware6.MemSpoutReAvg, \
+vmware6.MemSppedAvg, \
+vmware6.MemUsageAvg, \
+vmware6.MemVmmemctlAvg, \
+vmware6.MemVmmemctlTtAvg, \
+vmware6.MemZeroAvg, \
+vmware6.MemZipSavedLat, \
+vmware6.MemZippedLat, \
+vmware6.NetBroadcastRxSum, \
+vmware6.NetBroadcastTxSum, \
+vmware6.NetDroppedRxSum, \
+vmware6.NetDroppedTxSum, \
+vmware6.NetPacketsTxSum, \
+vmware6.NetReceivedAvg, \
+vmware6.NetTransmittedAvg, \
+vmware6.PowerEnergySum, \
+vmware6.PowerPowerAvg, \
+vmware6.ResCpuActav15Lat, \
+vmware6.ResCpuActav1Lat, \
+vmware6.ResCpuActav5Lat, \
+vmware6.ResCpuActpk15Lat, \
+vmware6.ResCpuActpk1Lat, \
+vmware6.ResCpuActpk5Lat, \
+vmware6.ResCpuMaxLd15Lat, \
+vmware6.ResCpuMaxLd1Lat, \
+vmware6.ResCpuMaxLd5Lat, \
+vmware6.ResCpuRunav15Lat, \
+vmware6.ResCpuRunav1Lat, \
+vmware6.ResCpuRunav5Lat, \
+vmware6.ResCpuRunpk15Lat, \
+vmware6.ResCpuRunpk1Lat, \
+vmware6.ResCpuRunpk5Lat, \
+vmware6.ResCpuSeCtLat, \
+vmware6.ResCpuSePeriodLat, \
+vmware6.SysHeartbeatLat, \
+vmware6.SysOsUpTeLat, \
+vmware6.SysUpTeLat, \
+vmware6.CpuCostopSum, \
+vmware6.CpuIdleSum, \
+vmware6.CpuMaxLdSum, \
+vmware6.CpuRdySum, \
+vmware6.CpuRunSum, \
+vmware6.CpuSystemSum, \
+vmware6.NetBytesRxAvg, \
+vmware6.NetBytesTxAvg, \
+vmware6.NetMulticastRxSum, \
+vmware6.NetMulticastTxSum, \
+vmware6.NetPacketsRxSum, \
+vmware6.NetUsageAvg, \
+vmware6.DiskBusResetsSum, \
+vmware6.DiskCsAdAvg, \
+vmware6.DiskCsAdSum, \
+vmware6.DiskCsSum, \
+vmware6.DiskNrRdAdAvg, \
+vmware6.DiskNrRdSum, \
+vmware6.DiskNrWeAdAvg, \
+vmware6.DiskNrWeSum, \
+vmware6.DaStNrRdAdAvg, \
+vmware6.DaStNrWeAdAvg, \
+vmware6.DaStRdAvg, \
+vmware6.DaStTlRdLyAvg, \
+vmware6.DaStTlWeLyAvg, \
+vmware6.DaStWeAvg, \
+vmware6.StAdptrCsAdAvg, \
+vmware6.StAdptrNrRdAdAvg, \
+vmware6.StAdptrNrWeAdAvg, \
+vmware6.StAdptrRdAvg, \
+vmware6.StAdptrTlRdLyAvg, \
+vmware6.StAdptrTlWeLyAvg, \
+vmware6.StAdptrWeAvg, \
+vmware6.StPthCsAdAvg, \
+vmware6.StPthNrRdAdAvg, \
+vmware6.StPthNrWeAdAvg, \
+vmware6.StPthRdAvg, \
+vmware6.StPthTlRdLyAvg, \
+vmware6.StPthTlWeLyAvg, \
+vmware6.StPthWeAvg, \
+vmware6.CpuRdCyAvg, \
+vmware6.CpuTlCyAvg, \
+vmware6.HbrHbrNetRxAvg, \
+vmware6.HbrHbrNetTxAvg, \
+vmware6.HbrHbrNumVmsAvg, \
+vmware6.MemHeapAvg, \
+vmware6.MemHeapfreeAvg, \
+vmware6.MemLlSpInAvg, \
+vmware6.MemLlSpOutAvg, \
+vmware6.MemLowfreeTdAvg, \
+vmware6.MemRdCyAvg, \
+vmware6.MemSharedcommonAvg, \
+vmware6.MemSpusedAvg, \
+vmware6.MemStateLat, \
+vmware6.MemSysUsageAvg, \
+vmware6.MemTlCyAvg, \
+vmware6.MemUdAvg, \
+vmware6.MemVmfs.pbc.OdLat, \
+vmware6.MemVmfsPbcCpMsRtiLt, \
+vmware6.MemVmfs.pbc.sizeLat, \
+vmware6.MemVmfsPbcSizMaxLat, \
+vmware6.MemVmfsPbcWrkSetLat, \
+vmware6.MemVmfsPbcWrkStMxLt, \
+vmware6.PowerPowerCapAvg, \
+vmware6.StAdptrMaxTlLyLat, \
+vmware6.StPthMaxTlLyLat, \
+vmware6.CpuCoreUnAvg, \
+vmware6.CpuUnAvg, \
+vmware6.SysReCpuAcMinLat, \
+vmware6.SysReCpuAcSsLat, \
+vmware6.SysReCpuAct1Lat, \
+vmware6.SysReCpuAct5Lat, \
+vmware6.SysReCpuMaxLd1Lat, \
+vmware6.SysReCpuMaxLd5Lat, \
+vmware6.SysReCpuRun1Lat, \
+vmware6.SysReCpuRun5Lat, \
+vmware6.SysReCpuUsageAvg, \
+vmware6.SysReFdUsageLat, \
+vmware6.SysReMemAcMaxLat, \
+vmware6.SysReMemAcMinLat, \
+vmware6.SysReMemAcSsLat, \
+vmware6.SysReMemCdLat, \
+vmware6.SysReMemCowLat, \
+vmware6.SysReMemMappedLat, \
+vmware6.SysReMemOdLat, \
+vmware6.SysReMemSharedLat, \
+vmware6.SysReMemSppedLat, \
+vmware6.SysReMemTdLat, \
+vmware6.SysReMemZeroLat, \
+vmware6.NetErrorsRxSum, \
+vmware6.NetErrorsTxSum, \
+vmware6.NetUnknownPsSum, \
+vmware6.DiskDeLyAvg, \
+vmware6.DiskDeRdLyAvg, \
+vmware6.DiskDeWeLyAvg, \
+vmware6.DiskKlLyAvg, \
+vmware6.DiskKlRdLyAvg, \
+vmware6.DiskKlWeLyAvg, \
+vmware6.DiskMaxQeDhAvg, \
+vmware6.DiskQeLyAvg, \
+vmware6.DiskQeRdLyAvg, \
+vmware6.DiskQeWeLyAvg, \
+vmware6.DiskTlLyAvg, \
+vmware6.DiskTlRdLyAvg, \
+vmware6.DiskTlWeLyAvg, \
+vmware6.vflModNumAeVMDKsLat, \
+vmware6.DaStDeIopsAvg, \
+vmware6.DaStDeMaxQeDhLat, \
+vmware6.DaStDeNlRdLyLat, \
+vmware6.DaStDeNlWeLyLat, \
+vmware6.DaStDeRdBytesLat, \
+vmware6.DaStDeRdIopsLat, \
+vmware6.DaStDeRdLdMcLat, \
+vmware6.DaStDeRdOIOLat, \
+vmware6.DaStDeVMOdLyLat, \
+vmware6.DaStDeWeBytesLat, \
+vmware6.DaStDeWeIopsLat, \
+vmware6.DaStDeWeLdMcLat, \
+vmware6.DaStDeWeOIOLat, \
+vmware6.DaStSiocAeTePeAvg, \
+vmware6.DaStSizeNdDeLyAvg
+
+report.vmware6.VrtDiskLeSsLat.name=vmware6.virtualDisk.largeSeeks.latest
+report.vmware6.VrtDiskLeSsLat.columns=VrtDiskLeSsLat
+report.vmware6.VrtDiskLeSsLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskLeSsLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskLeSsLat.command=--title="VMware6 virtualDisk.largeSeeks.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskLeSsLat" \
+DEF:xxx={rrd1}:VrtDiskLeSsLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskLeSsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskMmSsLat.name=vmware6.virtualDisk.mediumSeeks.latest
+report.vmware6.VrtDiskMmSsLat.columns=VrtDiskMmSsLat
+report.vmware6.VrtDiskMmSsLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskMmSsLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskMmSsLat.command=--title="VMware6 virtualDisk.mediumSeeks.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskMmSsLat" \
+DEF:xxx={rrd1}:VrtDiskMmSsLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskMmSsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskNrRdAdAvg.name=vmware6.virtualDisk.numberReadAveraged.average
+report.vmware6.VrtDiskNrRdAdAvg.columns=VrtDiskNrRdAdAvg
+report.vmware6.VrtDiskNrRdAdAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskNrRdAdAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskNrRdAdAvg.command=--title="VMware6 virtualDisk.numberReadAveraged.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskNrRdAdAvg" \
+DEF:xxx={rrd1}:VrtDiskNrRdAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskNrRdAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskNrWeAdAvg.name=vmware6.virtualDisk.numberWriteAveraged.average
+report.vmware6.VrtDiskNrWeAdAvg.columns=VrtDiskNrWeAdAvg
+report.vmware6.VrtDiskNrWeAdAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskNrWeAdAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskNrWeAdAvg.command=--title="VMware6 virtualDisk.numberWriteAveraged.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskNrWeAdAvg" \
+DEF:xxx={rrd1}:VrtDiskNrWeAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskNrWeAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskRdAvg.name=vmware6.virtualDisk.read.average
+report.vmware6.VrtDiskRdAvg.columns=VrtDiskRdAvg
+report.vmware6.VrtDiskRdAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskRdAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskRdAvg.command=--title="VMware6 virtualDisk.read.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskRdAvg" \
+DEF:xxx={rrd1}:VrtDiskRdAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskRdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskRdIOSizeLat.name=vmware6.virtualDisk.readIOSize.latest
+report.vmware6.VrtDiskRdIOSizeLat.columns=VrtDiskRdIOSizeLat
+report.vmware6.VrtDiskRdIOSizeLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskRdIOSizeLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskRdIOSizeLat.command=--title="VMware6 virtualDisk.readIOSize.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskRdIOSizeLat" \
+DEF:xxx={rrd1}:VrtDiskRdIOSizeLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskRdIOSizeLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskRdLdMcLat.name=vmware6.virtualDisk.readLoadMetric.latest
+report.vmware6.VrtDiskRdLdMcLat.columns=VrtDiskRdLdMcLat
+report.vmware6.VrtDiskRdLdMcLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskRdLdMcLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskRdLdMcLat.command=--title="VMware6 virtualDisk.readLoadMetric.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskRdLdMcLat" \
+DEF:xxx={rrd1}:VrtDiskRdLdMcLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskRdLdMcLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskRdLyUSLat.name=vmware6.virtualDisk.readLatencyUS.latest
+report.vmware6.VrtDiskRdLyUSLat.columns=VrtDiskRdLyUSLat
+report.vmware6.VrtDiskRdLyUSLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskRdLyUSLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskRdLyUSLat.command=--title="VMware6 virtualDisk.readLatencyUS.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskRdLyUSLat" \
+DEF:xxx={rrd1}:VrtDiskRdLyUSLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskRdLyUSLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskRdOIOLat.name=vmware6.virtualDisk.readOIO.latest
+report.vmware6.VrtDiskRdOIOLat.columns=VrtDiskRdOIOLat
+report.vmware6.VrtDiskRdOIOLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskRdOIOLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskRdOIOLat.command=--title="VMware6 virtualDisk.readOIO.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskRdOIOLat" \
+DEF:xxx={rrd1}:VrtDiskRdOIOLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskRdOIOLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskSlSsLat.name=vmware6.virtualDisk.smallSeeks.latest
+report.vmware6.VrtDiskSlSsLat.columns=VrtDiskSlSsLat
+report.vmware6.VrtDiskSlSsLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskSlSsLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskSlSsLat.command=--title="VMware6 virtualDisk.smallSeeks.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskSlSsLat" \
+DEF:xxx={rrd1}:VrtDiskSlSsLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskSlSsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskTlRdLyAvg.name=vmware6.virtualDisk.totalReadLatency.average
+report.vmware6.VrtDiskTlRdLyAvg.columns=VrtDiskTlRdLyAvg
+report.vmware6.VrtDiskTlRdLyAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskTlRdLyAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskTlRdLyAvg.command=--title="VMware6 virtualDisk.totalReadLatency.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskTlRdLyAvg" \
+DEF:xxx={rrd1}:VrtDiskTlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskTlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskTlWeLyAvg.name=vmware6.virtualDisk.totalWriteLatency.average
+report.vmware6.VrtDiskTlWeLyAvg.columns=VrtDiskTlWeLyAvg
+report.vmware6.VrtDiskTlWeLyAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskTlWeLyAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskTlWeLyAvg.command=--title="VMware6 virtualDisk.totalWriteLatency.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskTlWeLyAvg" \
+DEF:xxx={rrd1}:VrtDiskTlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskTlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskWeAvg.name=vmware6.virtualDisk.write.average
+report.vmware6.VrtDiskWeAvg.columns=VrtDiskWeAvg
+report.vmware6.VrtDiskWeAvg.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskWeAvg.type=vmware6VrtDisk
+report.vmware6.VrtDiskWeAvg.command=--title="VMware6 virtualDisk.write.average {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskWeAvg" \
+DEF:xxx={rrd1}:VrtDiskWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskWeIOSizeLat.name=vmware6.virtualDisk.writeIOSize.latest
+report.vmware6.VrtDiskWeIOSizeLat.columns=VrtDiskWeIOSizeLat
+report.vmware6.VrtDiskWeIOSizeLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskWeIOSizeLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskWeIOSizeLat.command=--title="VMware6 virtualDisk.writeIOSize.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskWeIOSizeLat" \
+DEF:xxx={rrd1}:VrtDiskWeIOSizeLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskWeIOSizeLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskWeLdMcLat.name=vmware6.virtualDisk.writeLoadMetric.latest
+report.vmware6.VrtDiskWeLdMcLat.columns=VrtDiskWeLdMcLat
+report.vmware6.VrtDiskWeLdMcLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskWeLdMcLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskWeLdMcLat.command=--title="VMware6 virtualDisk.writeLoadMetric.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskWeLdMcLat" \
+DEF:xxx={rrd1}:VrtDiskWeLdMcLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskWeLdMcLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskWeLyUSLat.name=vmware6.virtualDisk.writeLatencyUS.latest
+report.vmware6.VrtDiskWeLyUSLat.columns=VrtDiskWeLyUSLat
+report.vmware6.VrtDiskWeLyUSLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskWeLyUSLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskWeLyUSLat.command=--title="VMware6 virtualDisk.writeLatencyUS.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskWeLyUSLat" \
+DEF:xxx={rrd1}:VrtDiskWeLyUSLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskWeLyUSLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.VrtDiskWeOIOLat.name=vmware6.virtualDisk.writeOIO.latest
+report.vmware6.VrtDiskWeOIOLat.columns=VrtDiskWeOIOLat
+report.vmware6.VrtDiskWeOIOLat.propertiesValues=vmware6VrtDiskName
+report.vmware6.VrtDiskWeOIOLat.type=vmware6VrtDisk
+report.vmware6.VrtDiskWeOIOLat.command=--title="VMware6 virtualDisk.writeOIO.latest {vmware6VrtDiskName}" \
+--vertical-label="VrtDiskWeOIOLat" \
+DEF:xxx={rrd1}:VrtDiskWeOIOLat:AVERAGE \
+LINE2:xxx#0000ff:"VrtDiskWeOIOLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuDemandAvg.name=vmware6.cpu.demand.average
+report.vmware6.CpuDemandAvg.columns=CpuDemandAvg
+report.vmware6.CpuDemandAvg.type=nodeSnmp
+report.vmware6.CpuDemandAvg.command=--title="VMware6 cpu.demand.average" \
+--vertical-label="CpuDemandAvg" \
+DEF:xxx={rrd1}:CpuDemandAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuDemandAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuDmdEntRatioLat.name=vmware6.cpu.demandEntitlementRatio.latest
+report.vmware6.CpuDmdEntRatioLat.columns=CpuDmdEntRatioLat
+report.vmware6.CpuDmdEntRatioLat.type=nodeSnmp
+report.vmware6.CpuDmdEntRatioLat.command=--title="VMware6 cpu.demandEntitlementRatio.latest" \
+--vertical-label="CpuDmdEntRatioLat" \
+DEF:xxx={rrd1}:CpuDmdEntRatioLat:AVERAGE \
+LINE2:xxx#0000ff:"CpuDmdEntRatioLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuEntitlementLat.name=vmware6.cpu.entitlement.latest
+report.vmware6.CpuEntitlementLat.columns=CpuEntitlementLat
+report.vmware6.CpuEntitlementLat.type=nodeSnmp
+report.vmware6.CpuEntitlementLat.command=--title="VMware6 cpu.entitlement.latest" \
+--vertical-label="CpuEntitlementLat" \
+DEF:xxx={rrd1}:CpuEntitlementLat:AVERAGE \
+LINE2:xxx#0000ff:"CpuEntitlementLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuLyAvg.name=vmware6.cpu.latency.average
+report.vmware6.CpuLyAvg.columns=CpuLyAvg
+report.vmware6.CpuLyAvg.type=nodeSnmp
+report.vmware6.CpuLyAvg.command=--title="VMware6 cpu.latency.average" \
+--vertical-label="CpuLyAvg" \
+DEF:xxx={rrd1}:CpuLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuOverlapSum.name=vmware6.cpu.overlap.summation
+report.vmware6.CpuOverlapSum.columns=CpuOverlapSum
+report.vmware6.CpuOverlapSum.type=nodeSnmp
+report.vmware6.CpuOverlapSum.command=--title="VMware6 cpu.overlap.summation" \
+--vertical-label="CpuOverlapSum" \
+DEF:xxx={rrd1}:CpuOverlapSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuOverlapSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuRdinessAvg.name=vmware6.cpu.readiness.average
+report.vmware6.CpuRdinessAvg.columns=CpuRdinessAvg
+report.vmware6.CpuRdinessAvg.type=nodeSnmp
+report.vmware6.CpuRdinessAvg.command=--title="VMware6 cpu.readiness.average" \
+--vertical-label="CpuRdinessAvg" \
+DEF:xxx={rrd1}:CpuRdinessAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuRdinessAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuSpwaitSum.name=vmware6.cpu.swapwait.summation
+report.vmware6.CpuSpwaitSum.columns=CpuSpwaitSum
+report.vmware6.CpuSpwaitSum.type=nodeSnmp
+report.vmware6.CpuSpwaitSum.command=--title="VMware6 cpu.swapwait.summation" \
+--vertical-label="CpuSpwaitSum" \
+DEF:xxx={rrd1}:CpuSpwaitSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuSpwaitSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuUsageAvg.name=vmware6.cpu.usage.average
+report.vmware6.CpuUsageAvg.columns=CpuUsageAvg
+report.vmware6.CpuUsageAvg.type=nodeSnmp
+report.vmware6.CpuUsageAvg.command=--title="VMware6 cpu.usage.average" \
+--vertical-label="CpuUsageAvg" \
+DEF:xxx={rrd1}:CpuUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuUsagemhzAvg.name=vmware6.cpu.usagemhz.average
+report.vmware6.CpuUsagemhzAvg.columns=CpuUsagemhzAvg
+report.vmware6.CpuUsagemhzAvg.type=nodeSnmp
+report.vmware6.CpuUsagemhzAvg.command=--title="VMware6 cpu.usagemhz.average" \
+--vertical-label="CpuUsagemhzAvg" \
+DEF:xxx={rrd1}:CpuUsagemhzAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuUsagemhzAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuUsedSum.name=vmware6.cpu.used.summation
+report.vmware6.CpuUsedSum.columns=CpuUsedSum
+report.vmware6.CpuUsedSum.type=nodeSnmp
+report.vmware6.CpuUsedSum.command=--title="VMware6 cpu.used.summation" \
+--vertical-label="CpuUsedSum" \
+DEF:xxx={rrd1}:CpuUsedSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuUsedSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuWaitSum.name=vmware6.cpu.wait.summation
+report.vmware6.CpuWaitSum.columns=CpuWaitSum
+report.vmware6.CpuWaitSum.type=nodeSnmp
+report.vmware6.CpuWaitSum.command=--title="VMware6 cpu.wait.summation" \
+--vertical-label="CpuWaitSum" \
+DEF:xxx={rrd1}:CpuWaitSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuWaitSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStMaxTlLyLat.name=vmware6.datastore.maxTotalLatency.latest
+report.vmware6.DaStMaxTlLyLat.columns=DaStMaxTlLyLat
+report.vmware6.DaStMaxTlLyLat.type=nodeSnmp
+report.vmware6.DaStMaxTlLyLat.command=--title="VMware6 datastore.maxTotalLatency.latest" \
+--vertical-label="DaStMaxTlLyLat" \
+DEF:xxx={rrd1}:DaStMaxTlLyLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStMaxTlLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskMaxTlLyLat.name=vmware6.disk.maxTotalLatency.latest
+report.vmware6.DiskMaxTlLyLat.columns=DiskMaxTlLyLat
+report.vmware6.DiskMaxTlLyLat.type=nodeSnmp
+report.vmware6.DiskMaxTlLyLat.command=--title="VMware6 disk.maxTotalLatency.latest" \
+--vertical-label="DiskMaxTlLyLat" \
+DEF:xxx={rrd1}:DiskMaxTlLyLat:AVERAGE \
+LINE2:xxx#0000ff:"DiskMaxTlLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskRdAvg.name=vmware6.disk.read.average
+report.vmware6.DiskRdAvg.columns=DiskRdAvg
+report.vmware6.DiskRdAvg.type=nodeSnmp
+report.vmware6.DiskRdAvg.command=--title="VMware6 disk.read.average" \
+--vertical-label="DiskRdAvg" \
+DEF:xxx={rrd1}:DiskRdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskRdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskUsageAvg.name=vmware6.disk.usage.average
+report.vmware6.DiskUsageAvg.columns=DiskUsageAvg
+report.vmware6.DiskUsageAvg.type=nodeSnmp
+report.vmware6.DiskUsageAvg.command=--title="VMware6 disk.usage.average" \
+--vertical-label="DiskUsageAvg" \
+DEF:xxx={rrd1}:DiskUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskWeAvg.name=vmware6.disk.write.average
+report.vmware6.DiskWeAvg.columns=DiskWeAvg
+report.vmware6.DiskWeAvg.type=nodeSnmp
+report.vmware6.DiskWeAvg.command=--title="VMware6 disk.write.average" \
+--vertical-label="DiskWeAvg" \
+DEF:xxx={rrd1}:DiskWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemAeAvg.name=vmware6.mem.active.average
+report.vmware6.MemAeAvg.columns=MemAeAvg
+report.vmware6.MemAeAvg.type=nodeSnmp
+report.vmware6.MemAeAvg.command=--title="VMware6 mem.active.average" \
+--vertical-label="MemAeAvg" \
+DEF:xxx={rrd1}:MemAeAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemAeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemAeWeAvg.name=vmware6.mem.activewrite.average
+report.vmware6.MemAeWeAvg.columns=MemAeWeAvg
+report.vmware6.MemAeWeAvg.type=nodeSnmp
+report.vmware6.MemAeWeAvg.command=--title="VMware6 mem.activewrite.average" \
+--vertical-label="MemAeWeAvg" \
+DEF:xxx={rrd1}:MemAeWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemAeWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemCdAvg.name=vmware6.mem.consumed.average
+report.vmware6.MemCdAvg.columns=MemCdAvg
+report.vmware6.MemCdAvg.type=nodeSnmp
+report.vmware6.MemCdAvg.command=--title="VMware6 mem.consumed.average" \
+--vertical-label="MemCdAvg" \
+DEF:xxx={rrd1}:MemCdAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemCdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemCnReAvg.name=vmware6.mem.compressionRate.average
+report.vmware6.MemCnReAvg.columns=MemCnReAvg
+report.vmware6.MemCnReAvg.type=nodeSnmp
+report.vmware6.MemCnReAvg.command=--title="VMware6 mem.compressionRate.average" \
+--vertical-label="MemCnReAvg" \
+DEF:xxx={rrd1}:MemCnReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemCnReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemCompressedAvg.name=vmware6.mem.compressed.average
+report.vmware6.MemCompressedAvg.columns=MemCompressedAvg
+report.vmware6.MemCompressedAvg.type=nodeSnmp
+report.vmware6.MemCompressedAvg.command=--title="VMware6 mem.compressed.average" \
+--vertical-label="MemCompressedAvg" \
+DEF:xxx={rrd1}:MemCompressedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemCompressedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemDnReAvg.name=vmware6.mem.decompressionRate.average
+report.vmware6.MemDnReAvg.columns=MemDnReAvg
+report.vmware6.MemDnReAvg.type=nodeSnmp
+report.vmware6.MemDnReAvg.command=--title="VMware6 mem.decompressionRate.average" \
+--vertical-label="MemDnReAvg" \
+DEF:xxx={rrd1}:MemDnReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemDnReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemEntitlementAvg.name=vmware6.mem.entitlement.average
+report.vmware6.MemEntitlementAvg.columns=MemEntitlementAvg
+report.vmware6.MemEntitlementAvg.type=nodeSnmp
+report.vmware6.MemEntitlementAvg.command=--title="VMware6 mem.entitlement.average" \
+--vertical-label="MemEntitlementAvg" \
+DEF:xxx={rrd1}:MemEntitlementAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemEntitlementAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemGrantedAvg.name=vmware6.mem.granted.average
+report.vmware6.MemGrantedAvg.columns=MemGrantedAvg
+report.vmware6.MemGrantedAvg.type=nodeSnmp
+report.vmware6.MemGrantedAvg.command=--title="VMware6 mem.granted.average" \
+--vertical-label="MemGrantedAvg" \
+DEF:xxx={rrd1}:MemGrantedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemGrantedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLlSpInReAvg.name=vmware6.mem.llSwapInRate.average
+report.vmware6.MemLlSpInReAvg.columns=MemLlSpInReAvg
+report.vmware6.MemLlSpInReAvg.type=nodeSnmp
+report.vmware6.MemLlSpInReAvg.command=--title="VMware6 mem.llSwapInRate.average" \
+--vertical-label="MemLlSpInReAvg" \
+DEF:xxx={rrd1}:MemLlSpInReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLlSpInReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLlSpOutReAvg.name=vmware6.mem.llSwapOutRate.average
+report.vmware6.MemLlSpOutReAvg.columns=MemLlSpOutReAvg
+report.vmware6.MemLlSpOutReAvg.type=nodeSnmp
+report.vmware6.MemLlSpOutReAvg.command=--title="VMware6 mem.llSwapOutRate.average" \
+--vertical-label="MemLlSpOutReAvg" \
+DEF:xxx={rrd1}:MemLlSpOutReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLlSpOutReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLlSpUsedAvg.name=vmware6.mem.llSwapUsed.average
+report.vmware6.MemLlSpUsedAvg.columns=MemLlSpUsedAvg
+report.vmware6.MemLlSpUsedAvg.type=nodeSnmp
+report.vmware6.MemLlSpUsedAvg.command=--title="VMware6 mem.llSwapUsed.average" \
+--vertical-label="MemLlSpUsedAvg" \
+DEF:xxx={rrd1}:MemLlSpUsedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLlSpUsedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLyAvg.name=vmware6.mem.latency.average
+report.vmware6.MemLyAvg.columns=MemLyAvg
+report.vmware6.MemLyAvg.type=nodeSnmp
+report.vmware6.MemLyAvg.command=--title="VMware6 mem.latency.average" \
+--vertical-label="MemLyAvg" \
+DEF:xxx={rrd1}:MemLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemOdAvg.name=vmware6.mem.overhead.average
+report.vmware6.MemOdAvg.columns=MemOdAvg
+report.vmware6.MemOdAvg.type=nodeSnmp
+report.vmware6.MemOdAvg.command=--title="VMware6 mem.overhead.average" \
+--vertical-label="MemOdAvg" \
+DEF:xxx={rrd1}:MemOdAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemOdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemOdMaxAvg.name=vmware6.mem.overheadMax.average
+report.vmware6.MemOdMaxAvg.columns=MemOdMaxAvg
+report.vmware6.MemOdMaxAvg.type=nodeSnmp
+report.vmware6.MemOdMaxAvg.command=--title="VMware6 mem.overheadMax.average" \
+--vertical-label="MemOdMaxAvg" \
+DEF:xxx={rrd1}:MemOdMaxAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemOdMaxAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemOdTdAvg.name=vmware6.mem.overheadTouched.average
+report.vmware6.MemOdTdAvg.columns=MemOdTdAvg
+report.vmware6.MemOdTdAvg.type=nodeSnmp
+report.vmware6.MemOdTdAvg.command=--title="VMware6 mem.overheadTouched.average" \
+--vertical-label="MemOdTdAvg" \
+DEF:xxx={rrd1}:MemOdTdAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemOdTdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSharedAvg.name=vmware6.mem.shared.average
+report.vmware6.MemSharedAvg.columns=MemSharedAvg
+report.vmware6.MemSharedAvg.type=nodeSnmp
+report.vmware6.MemSharedAvg.command=--title="VMware6 mem.shared.average" \
+--vertical-label="MemSharedAvg" \
+DEF:xxx={rrd1}:MemSharedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSharedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpTtAvg.name=vmware6.mem.swaptarget.average
+report.vmware6.MemSpTtAvg.columns=MemSpTtAvg
+report.vmware6.MemSpTtAvg.type=nodeSnmp
+report.vmware6.MemSpTtAvg.command=--title="VMware6 mem.swaptarget.average" \
+--vertical-label="MemSpTtAvg" \
+DEF:xxx={rrd1}:MemSpTtAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpTtAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpinAvg.name=vmware6.mem.swapin.average
+report.vmware6.MemSpinAvg.columns=MemSpinAvg
+report.vmware6.MemSpinAvg.type=nodeSnmp
+report.vmware6.MemSpinAvg.command=--title="VMware6 mem.swapin.average" \
+--vertical-label="MemSpinAvg" \
+DEF:xxx={rrd1}:MemSpinAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpinAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpinReAvg.name=vmware6.mem.swapinRate.average
+report.vmware6.MemSpinReAvg.columns=MemSpinReAvg
+report.vmware6.MemSpinReAvg.type=nodeSnmp
+report.vmware6.MemSpinReAvg.command=--title="VMware6 mem.swapinRate.average" \
+--vertical-label="MemSpinReAvg" \
+DEF:xxx={rrd1}:MemSpinReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpinReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpoutAvg.name=vmware6.mem.swapout.average
+report.vmware6.MemSpoutAvg.columns=MemSpoutAvg
+report.vmware6.MemSpoutAvg.type=nodeSnmp
+report.vmware6.MemSpoutAvg.command=--title="VMware6 mem.swapout.average" \
+--vertical-label="MemSpoutAvg" \
+DEF:xxx={rrd1}:MemSpoutAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpoutAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpoutReAvg.name=vmware6.mem.swapoutRate.average
+report.vmware6.MemSpoutReAvg.columns=MemSpoutReAvg
+report.vmware6.MemSpoutReAvg.type=nodeSnmp
+report.vmware6.MemSpoutReAvg.command=--title="VMware6 mem.swapoutRate.average" \
+--vertical-label="MemSpoutReAvg" \
+DEF:xxx={rrd1}:MemSpoutReAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpoutReAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSppedAvg.name=vmware6.mem.swapped.average
+report.vmware6.MemSppedAvg.columns=MemSppedAvg
+report.vmware6.MemSppedAvg.type=nodeSnmp
+report.vmware6.MemSppedAvg.command=--title="VMware6 mem.swapped.average" \
+--vertical-label="MemSppedAvg" \
+DEF:xxx={rrd1}:MemSppedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSppedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemUsageAvg.name=vmware6.mem.usage.average
+report.vmware6.MemUsageAvg.columns=MemUsageAvg
+report.vmware6.MemUsageAvg.type=nodeSnmp
+report.vmware6.MemUsageAvg.command=--title="VMware6 mem.usage.average" \
+--vertical-label="MemUsageAvg" \
+DEF:xxx={rrd1}:MemUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmmemctlAvg.name=vmware6.mem.vmmemctl.average
+report.vmware6.MemVmmemctlAvg.columns=MemVmmemctlAvg
+report.vmware6.MemVmmemctlAvg.type=nodeSnmp
+report.vmware6.MemVmmemctlAvg.command=--title="VMware6 mem.vmmemctl.average" \
+--vertical-label="MemVmmemctlAvg" \
+DEF:xxx={rrd1}:MemVmmemctlAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemVmmemctlAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmmemctlTtAvg.name=vmware6.mem.vmmemctltarget.average
+report.vmware6.MemVmmemctlTtAvg.columns=MemVmmemctlTtAvg
+report.vmware6.MemVmmemctlTtAvg.type=nodeSnmp
+report.vmware6.MemVmmemctlTtAvg.command=--title="VMware6 mem.vmmemctltarget.average" \
+--vertical-label="MemVmmemctlTtAvg" \
+DEF:xxx={rrd1}:MemVmmemctlTtAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemVmmemctlTtAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemZeroAvg.name=vmware6.mem.zero.average
+report.vmware6.MemZeroAvg.columns=MemZeroAvg
+report.vmware6.MemZeroAvg.type=nodeSnmp
+report.vmware6.MemZeroAvg.command=--title="VMware6 mem.zero.average" \
+--vertical-label="MemZeroAvg" \
+DEF:xxx={rrd1}:MemZeroAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemZeroAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemZipSavedLat.name=vmware6.mem.zipSaved.latest
+report.vmware6.MemZipSavedLat.columns=MemZipSavedLat
+report.vmware6.MemZipSavedLat.type=nodeSnmp
+report.vmware6.MemZipSavedLat.command=--title="VMware6 mem.zipSaved.latest" \
+--vertical-label="MemZipSavedLat" \
+DEF:xxx={rrd1}:MemZipSavedLat:AVERAGE \
+LINE2:xxx#0000ff:"MemZipSavedLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemZippedLat.name=vmware6.mem.zipped.latest
+report.vmware6.MemZippedLat.columns=MemZippedLat
+report.vmware6.MemZippedLat.type=nodeSnmp
+report.vmware6.MemZippedLat.command=--title="VMware6 mem.zipped.latest" \
+--vertical-label="MemZippedLat" \
+DEF:xxx={rrd1}:MemZippedLat:AVERAGE \
+LINE2:xxx#0000ff:"MemZippedLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetBroadcastRxSum.name=vmware6.net.broadcastRx.summation
+report.vmware6.NetBroadcastRxSum.columns=NetBroadcastRxSum
+report.vmware6.NetBroadcastRxSum.type=nodeSnmp
+report.vmware6.NetBroadcastRxSum.command=--title="VMware6 net.broadcastRx.summation" \
+--vertical-label="NetBroadcastRxSum" \
+DEF:xxx={rrd1}:NetBroadcastRxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetBroadcastRxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetBroadcastTxSum.name=vmware6.net.broadcastTx.summation
+report.vmware6.NetBroadcastTxSum.columns=NetBroadcastTxSum
+report.vmware6.NetBroadcastTxSum.type=nodeSnmp
+report.vmware6.NetBroadcastTxSum.command=--title="VMware6 net.broadcastTx.summation" \
+--vertical-label="NetBroadcastTxSum" \
+DEF:xxx={rrd1}:NetBroadcastTxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetBroadcastTxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetDroppedRxSum.name=vmware6.net.droppedRx.summation
+report.vmware6.NetDroppedRxSum.columns=NetDroppedRxSum
+report.vmware6.NetDroppedRxSum.type=nodeSnmp
+report.vmware6.NetDroppedRxSum.command=--title="VMware6 net.droppedRx.summation" \
+--vertical-label="NetDroppedRxSum" \
+DEF:xxx={rrd1}:NetDroppedRxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetDroppedRxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetDroppedTxSum.name=vmware6.net.droppedTx.summation
+report.vmware6.NetDroppedTxSum.columns=NetDroppedTxSum
+report.vmware6.NetDroppedTxSum.type=nodeSnmp
+report.vmware6.NetDroppedTxSum.command=--title="VMware6 net.droppedTx.summation" \
+--vertical-label="NetDroppedTxSum" \
+DEF:xxx={rrd1}:NetDroppedTxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetDroppedTxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetPacketsTxSum.name=vmware6.net.packetsTx.summation
+report.vmware6.NetPacketsTxSum.columns=NetPacketsTxSum
+report.vmware6.NetPacketsTxSum.type=nodeSnmp
+report.vmware6.NetPacketsTxSum.command=--title="VMware6 net.packetsTx.summation" \
+--vertical-label="NetPacketsTxSum" \
+DEF:xxx={rrd1}:NetPacketsTxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetPacketsTxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetReceivedAvg.name=vmware6.net.received.average
+report.vmware6.NetReceivedAvg.columns=NetReceivedAvg
+report.vmware6.NetReceivedAvg.type=nodeSnmp
+report.vmware6.NetReceivedAvg.command=--title="VMware6 net.received.average" \
+--vertical-label="NetReceivedAvg" \
+DEF:xxx={rrd1}:NetReceivedAvg:AVERAGE \
+LINE2:xxx#0000ff:"NetReceivedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetTransmittedAvg.name=vmware6.net.transmitted.average
+report.vmware6.NetTransmittedAvg.columns=NetTransmittedAvg
+report.vmware6.NetTransmittedAvg.type=nodeSnmp
+report.vmware6.NetTransmittedAvg.command=--title="VMware6 net.transmitted.average" \
+--vertical-label="NetTransmittedAvg" \
+DEF:xxx={rrd1}:NetTransmittedAvg:AVERAGE \
+LINE2:xxx#0000ff:"NetTransmittedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.PowerEnergySum.name=vmware6.power.energy.summation
+report.vmware6.PowerEnergySum.columns=PowerEnergySum
+report.vmware6.PowerEnergySum.type=nodeSnmp
+report.vmware6.PowerEnergySum.command=--title="VMware6 power.energy.summation" \
+--vertical-label="PowerEnergySum" \
+DEF:xxx={rrd1}:PowerEnergySum:AVERAGE \
+LINE2:xxx#0000ff:"PowerEnergySum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.PowerPowerAvg.name=vmware6.power.power.average
+report.vmware6.PowerPowerAvg.columns=PowerPowerAvg
+report.vmware6.PowerPowerAvg.type=nodeSnmp
+report.vmware6.PowerPowerAvg.command=--title="VMware6 power.power.average" \
+--vertical-label="PowerPowerAvg" \
+DEF:xxx={rrd1}:PowerPowerAvg:AVERAGE \
+LINE2:xxx#0000ff:"PowerPowerAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActav15Lat.name=vmware6.rescpu.actav15.latest
+report.vmware6.ResCpuActav15Lat.columns=ResCpuActav15Lat
+report.vmware6.ResCpuActav15Lat.type=nodeSnmp
+report.vmware6.ResCpuActav15Lat.command=--title="VMware6 rescpu.actav15.latest" \
+--vertical-label="ResCpuActav15Lat" \
+DEF:xxx={rrd1}:ResCpuActav15Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActav15Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActav1Lat.name=vmware6.rescpu.actav1.latest
+report.vmware6.ResCpuActav1Lat.columns=ResCpuActav1Lat
+report.vmware6.ResCpuActav1Lat.type=nodeSnmp
+report.vmware6.ResCpuActav1Lat.command=--title="VMware6 rescpu.actav1.latest" \
+--vertical-label="ResCpuActav1Lat" \
+DEF:xxx={rrd1}:ResCpuActav1Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActav1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActav5Lat.name=vmware6.rescpu.actav5.latest
+report.vmware6.ResCpuActav5Lat.columns=ResCpuActav5Lat
+report.vmware6.ResCpuActav5Lat.type=nodeSnmp
+report.vmware6.ResCpuActav5Lat.command=--title="VMware6 rescpu.actav5.latest" \
+--vertical-label="ResCpuActav5Lat" \
+DEF:xxx={rrd1}:ResCpuActav5Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActav5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActpk15Lat.name=vmware6.rescpu.actpk15.latest
+report.vmware6.ResCpuActpk15Lat.columns=ResCpuActpk15Lat
+report.vmware6.ResCpuActpk15Lat.type=nodeSnmp
+report.vmware6.ResCpuActpk15Lat.command=--title="VMware6 rescpu.actpk15.latest" \
+--vertical-label="ResCpuActpk15Lat" \
+DEF:xxx={rrd1}:ResCpuActpk15Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActpk15Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActpk1Lat.name=vmware6.rescpu.actpk1.latest
+report.vmware6.ResCpuActpk1Lat.columns=ResCpuActpk1Lat
+report.vmware6.ResCpuActpk1Lat.type=nodeSnmp
+report.vmware6.ResCpuActpk1Lat.command=--title="VMware6 rescpu.actpk1.latest" \
+--vertical-label="ResCpuActpk1Lat" \
+DEF:xxx={rrd1}:ResCpuActpk1Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActpk1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuActpk5Lat.name=vmware6.rescpu.actpk5.latest
+report.vmware6.ResCpuActpk5Lat.columns=ResCpuActpk5Lat
+report.vmware6.ResCpuActpk5Lat.type=nodeSnmp
+report.vmware6.ResCpuActpk5Lat.command=--title="VMware6 rescpu.actpk5.latest" \
+--vertical-label="ResCpuActpk5Lat" \
+DEF:xxx={rrd1}:ResCpuActpk5Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuActpk5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuMaxLd15Lat.name=vmware6.rescpu.maxLimited15.latest
+report.vmware6.ResCpuMaxLd15Lat.columns=ResCpuMaxLd15Lat
+report.vmware6.ResCpuMaxLd15Lat.type=nodeSnmp
+report.vmware6.ResCpuMaxLd15Lat.command=--title="VMware6 rescpu.maxLimited15.latest" \
+--vertical-label="ResCpuMaxLd15Lat" \
+DEF:xxx={rrd1}:ResCpuMaxLd15Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuMaxLd15Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuMaxLd1Lat.name=vmware6.rescpu.maxLimited1.latest
+report.vmware6.ResCpuMaxLd1Lat.columns=ResCpuMaxLd1Lat
+report.vmware6.ResCpuMaxLd1Lat.type=nodeSnmp
+report.vmware6.ResCpuMaxLd1Lat.command=--title="VMware6 rescpu.maxLimited1.latest" \
+--vertical-label="ResCpuMaxLd1Lat" \
+DEF:xxx={rrd1}:ResCpuMaxLd1Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuMaxLd1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuMaxLd5Lat.name=vmware6.rescpu.maxLimited5.latest
+report.vmware6.ResCpuMaxLd5Lat.columns=ResCpuMaxLd5Lat
+report.vmware6.ResCpuMaxLd5Lat.type=nodeSnmp
+report.vmware6.ResCpuMaxLd5Lat.command=--title="VMware6 rescpu.maxLimited5.latest" \
+--vertical-label="ResCpuMaxLd5Lat" \
+DEF:xxx={rrd1}:ResCpuMaxLd5Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuMaxLd5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunav15Lat.name=vmware6.rescpu.runav15.latest
+report.vmware6.ResCpuRunav15Lat.columns=ResCpuRunav15Lat
+report.vmware6.ResCpuRunav15Lat.type=nodeSnmp
+report.vmware6.ResCpuRunav15Lat.command=--title="VMware6 rescpu.runav15.latest" \
+--vertical-label="ResCpuRunav15Lat" \
+DEF:xxx={rrd1}:ResCpuRunav15Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunav15Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunav1Lat.name=vmware6.rescpu.runav1.latest
+report.vmware6.ResCpuRunav1Lat.columns=ResCpuRunav1Lat
+report.vmware6.ResCpuRunav1Lat.type=nodeSnmp
+report.vmware6.ResCpuRunav1Lat.command=--title="VMware6 rescpu.runav1.latest" \
+--vertical-label="ResCpuRunav1Lat" \
+DEF:xxx={rrd1}:ResCpuRunav1Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunav1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunav5Lat.name=vmware6.rescpu.runav5.latest
+report.vmware6.ResCpuRunav5Lat.columns=ResCpuRunav5Lat
+report.vmware6.ResCpuRunav5Lat.type=nodeSnmp
+report.vmware6.ResCpuRunav5Lat.command=--title="VMware6 rescpu.runav5.latest" \
+--vertical-label="ResCpuRunav5Lat" \
+DEF:xxx={rrd1}:ResCpuRunav5Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunav5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunpk15Lat.name=vmware6.rescpu.runpk15.latest
+report.vmware6.ResCpuRunpk15Lat.columns=ResCpuRunpk15Lat
+report.vmware6.ResCpuRunpk15Lat.type=nodeSnmp
+report.vmware6.ResCpuRunpk15Lat.command=--title="VMware6 rescpu.runpk15.latest" \
+--vertical-label="ResCpuRunpk15Lat" \
+DEF:xxx={rrd1}:ResCpuRunpk15Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunpk15Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunpk1Lat.name=vmware6.rescpu.runpk1.latest
+report.vmware6.ResCpuRunpk1Lat.columns=ResCpuRunpk1Lat
+report.vmware6.ResCpuRunpk1Lat.type=nodeSnmp
+report.vmware6.ResCpuRunpk1Lat.command=--title="VMware6 rescpu.runpk1.latest" \
+--vertical-label="ResCpuRunpk1Lat" \
+DEF:xxx={rrd1}:ResCpuRunpk1Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunpk1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuRunpk5Lat.name=vmware6.rescpu.runpk5.latest
+report.vmware6.ResCpuRunpk5Lat.columns=ResCpuRunpk5Lat
+report.vmware6.ResCpuRunpk5Lat.type=nodeSnmp
+report.vmware6.ResCpuRunpk5Lat.command=--title="VMware6 rescpu.runpk5.latest" \
+--vertical-label="ResCpuRunpk5Lat" \
+DEF:xxx={rrd1}:ResCpuRunpk5Lat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuRunpk5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuSeCtLat.name=vmware6.rescpu.sampleCount.latest
+report.vmware6.ResCpuSeCtLat.columns=ResCpuSeCtLat
+report.vmware6.ResCpuSeCtLat.type=nodeSnmp
+report.vmware6.ResCpuSeCtLat.command=--title="VMware6 rescpu.sampleCount.latest" \
+--vertical-label="ResCpuSeCtLat" \
+DEF:xxx={rrd1}:ResCpuSeCtLat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuSeCtLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.ResCpuSePeriodLat.name=vmware6.rescpu.samplePeriod.latest
+report.vmware6.ResCpuSePeriodLat.columns=ResCpuSePeriodLat
+report.vmware6.ResCpuSePeriodLat.type=nodeSnmp
+report.vmware6.ResCpuSePeriodLat.command=--title="VMware6 rescpu.samplePeriod.latest" \
+--vertical-label="ResCpuSePeriodLat" \
+DEF:xxx={rrd1}:ResCpuSePeriodLat:AVERAGE \
+LINE2:xxx#0000ff:"ResCpuSePeriodLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysHeartbeatLat.name=vmware6.sys.heartbeat.latest
+report.vmware6.SysHeartbeatLat.columns=SysHeartbeatLat
+report.vmware6.SysHeartbeatLat.type=nodeSnmp
+report.vmware6.SysHeartbeatLat.command=--title="VMware6 sys.heartbeat.latest" \
+--vertical-label="SysHeartbeatLat" \
+DEF:xxx={rrd1}:SysHeartbeatLat:AVERAGE \
+LINE2:xxx#0000ff:"SysHeartbeatLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysOsUpTeLat.name=vmware6.sys.osUptime.latest
+report.vmware6.SysOsUpTeLat.columns=SysOsUpTeLat
+report.vmware6.SysOsUpTeLat.type=nodeSnmp
+report.vmware6.SysOsUpTeLat.command=--title="VMware6 sys.osUptime.latest" \
+--vertical-label="SysOsUpTeLat" \
+DEF:xxx={rrd1}:SysOsUpTeLat:AVERAGE \
+LINE2:xxx#0000ff:"SysOsUpTeLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysUpTeLat.name=vmware6.sys.uptime.latest
+report.vmware6.SysUpTeLat.columns=SysUpTeLat
+report.vmware6.SysUpTeLat.type=nodeSnmp
+report.vmware6.SysUpTeLat.command=--title="VMware6 sys.uptime.latest" \
+--vertical-label="SysUpTeLat" \
+DEF:xxx={rrd1}:SysUpTeLat:AVERAGE \
+LINE2:xxx#0000ff:"SysUpTeLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuCostopSum.name=vmware6.cpu.costop.summation
+report.vmware6.CpuCostopSum.columns=CpuCostopSum
+report.vmware6.CpuCostopSum.propertiesValues=vmware6CpuName
+report.vmware6.CpuCostopSum.type=vmware6Cpu
+report.vmware6.CpuCostopSum.command=--title="VMware6 cpu.costop.summation {vmware6CpuName}" \
+--vertical-label="CpuCostopSum" \
+DEF:xxx={rrd1}:CpuCostopSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuCostopSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuIdleSum.name=vmware6.cpu.idle.summation
+report.vmware6.CpuIdleSum.columns=CpuIdleSum
+report.vmware6.CpuIdleSum.propertiesValues=vmware6CpuName
+report.vmware6.CpuIdleSum.type=vmware6Cpu
+report.vmware6.CpuIdleSum.command=--title="VMware6 cpu.idle.summation {vmware6CpuName}" \
+--vertical-label="CpuIdleSum" \
+DEF:xxx={rrd1}:CpuIdleSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuIdleSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuMaxLdSum.name=vmware6.cpu.maxlimited.summation
+report.vmware6.CpuMaxLdSum.columns=CpuMaxLdSum
+report.vmware6.CpuMaxLdSum.propertiesValues=vmware6CpuName
+report.vmware6.CpuMaxLdSum.type=vmware6Cpu
+report.vmware6.CpuMaxLdSum.command=--title="VMware6 cpu.maxlimited.summation {vmware6CpuName}" \
+--vertical-label="CpuMaxLdSum" \
+DEF:xxx={rrd1}:CpuMaxLdSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuMaxLdSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuRdySum.name=vmware6.cpu.ready.summation
+report.vmware6.CpuRdySum.columns=CpuRdySum
+report.vmware6.CpuRdySum.propertiesValues=vmware6CpuName
+report.vmware6.CpuRdySum.type=vmware6Cpu
+report.vmware6.CpuRdySum.command=--title="VMware6 cpu.ready.summation {vmware6CpuName}" \
+--vertical-label="CpuRdySum" \
+DEF:xxx={rrd1}:CpuRdySum:AVERAGE \
+LINE2:xxx#0000ff:"CpuRdySum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuRunSum.name=vmware6.cpu.run.summation
+report.vmware6.CpuRunSum.columns=CpuRunSum
+report.vmware6.CpuRunSum.propertiesValues=vmware6CpuName
+report.vmware6.CpuRunSum.type=vmware6Cpu
+report.vmware6.CpuRunSum.command=--title="VMware6 cpu.run.summation {vmware6CpuName}" \
+--vertical-label="CpuRunSum" \
+DEF:xxx={rrd1}:CpuRunSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuRunSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuSystemSum.name=vmware6.cpu.system.summation
+report.vmware6.CpuSystemSum.columns=CpuSystemSum
+report.vmware6.CpuSystemSum.propertiesValues=vmware6CpuName
+report.vmware6.CpuSystemSum.type=vmware6Cpu
+report.vmware6.CpuSystemSum.command=--title="VMware6 cpu.system.summation {vmware6CpuName}" \
+--vertical-label="CpuSystemSum" \
+DEF:xxx={rrd1}:CpuSystemSum:AVERAGE \
+LINE2:xxx#0000ff:"CpuSystemSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetBytesRxAvg.name=vmware6.net.bytesRx.average
+report.vmware6.NetBytesRxAvg.columns=NetBytesRxAvg
+report.vmware6.NetBytesRxAvg.propertiesValues=vmware6NetName
+report.vmware6.NetBytesRxAvg.type=vmware6Net
+report.vmware6.NetBytesRxAvg.command=--title="VMware6 net.bytesRx.average {vmware6NetName}" \
+--vertical-label="NetBytesRxAvg" \
+DEF:xxx={rrd1}:NetBytesRxAvg:AVERAGE \
+LINE2:xxx#0000ff:"NetBytesRxAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetBytesTxAvg.name=vmware6.net.bytesTx.average
+report.vmware6.NetBytesTxAvg.columns=NetBytesTxAvg
+report.vmware6.NetBytesTxAvg.propertiesValues=vmware6NetName
+report.vmware6.NetBytesTxAvg.type=vmware6Net
+report.vmware6.NetBytesTxAvg.command=--title="VMware6 net.bytesTx.average {vmware6NetName}" \
+--vertical-label="NetBytesTxAvg" \
+DEF:xxx={rrd1}:NetBytesTxAvg:AVERAGE \
+LINE2:xxx#0000ff:"NetBytesTxAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetMulticastRxSum.name=vmware6.net.multicastRx.summation
+report.vmware6.NetMulticastRxSum.columns=NetMulticastRxSum
+report.vmware6.NetMulticastRxSum.propertiesValues=vmware6NetName
+report.vmware6.NetMulticastRxSum.type=vmware6Net
+report.vmware6.NetMulticastRxSum.command=--title="VMware6 net.multicastRx.summation {vmware6NetName}" \
+--vertical-label="NetMulticastRxSum" \
+DEF:xxx={rrd1}:NetMulticastRxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetMulticastRxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetMulticastTxSum.name=vmware6.net.multicastTx.summation
+report.vmware6.NetMulticastTxSum.columns=NetMulticastTxSum
+report.vmware6.NetMulticastTxSum.propertiesValues=vmware6NetName
+report.vmware6.NetMulticastTxSum.type=vmware6Net
+report.vmware6.NetMulticastTxSum.command=--title="VMware6 net.multicastTx.summation {vmware6NetName}" \
+--vertical-label="NetMulticastTxSum" \
+DEF:xxx={rrd1}:NetMulticastTxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetMulticastTxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetPacketsRxSum.name=vmware6.net.packetsRx.summation
+report.vmware6.NetPacketsRxSum.columns=NetPacketsRxSum
+report.vmware6.NetPacketsRxSum.propertiesValues=vmware6NetName
+report.vmware6.NetPacketsRxSum.type=vmware6Net
+report.vmware6.NetPacketsRxSum.command=--title="VMware6 net.packetsRx.summation {vmware6NetName}" \
+--vertical-label="NetPacketsRxSum" \
+DEF:xxx={rrd1}:NetPacketsRxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetPacketsRxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetUsageAvg.name=vmware6.net.usage.average
+report.vmware6.NetUsageAvg.columns=NetUsageAvg
+report.vmware6.NetUsageAvg.propertiesValues=vmware6NetName
+report.vmware6.NetUsageAvg.type=vmware6Net
+report.vmware6.NetUsageAvg.command=--title="VMware6 net.usage.average {vmware6NetName}" \
+--vertical-label="NetUsageAvg" \
+DEF:xxx={rrd1}:NetUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"NetUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskBusResetsSum.name=vmware6.disk.busResets.summation
+report.vmware6.DiskBusResetsSum.columns=DiskBusResetsSum
+report.vmware6.DiskBusResetsSum.propertiesValues=vmware6DiskName
+report.vmware6.DiskBusResetsSum.type=vmware6Disk
+report.vmware6.DiskBusResetsSum.command=--title="VMware6 disk.busResets.summation {vmware6DiskName}" \
+--vertical-label="DiskBusResetsSum" \
+DEF:xxx={rrd1}:DiskBusResetsSum:AVERAGE \
+LINE2:xxx#0000ff:"DiskBusResetsSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskCsAdAvg.name=vmware6.disk.commandsAveraged.average
+report.vmware6.DiskCsAdAvg.columns=DiskCsAdAvg
+report.vmware6.DiskCsAdAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskCsAdAvg.type=vmware6Disk
+report.vmware6.DiskCsAdAvg.command=--title="VMware6 disk.commandsAveraged.average {vmware6DiskName}" \
+--vertical-label="DiskCsAdAvg" \
+DEF:xxx={rrd1}:DiskCsAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskCsAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskCsAdSum.name=vmware6.disk.commandsAborted.summation
+report.vmware6.DiskCsAdSum.columns=DiskCsAdSum
+report.vmware6.DiskCsAdSum.propertiesValues=vmware6DiskName
+report.vmware6.DiskCsAdSum.type=vmware6Disk
+report.vmware6.DiskCsAdSum.command=--title="VMware6 disk.commandsAborted.summation {vmware6DiskName}" \
+--vertical-label="DiskCsAdSum" \
+DEF:xxx={rrd1}:DiskCsAdSum:AVERAGE \
+LINE2:xxx#0000ff:"DiskCsAdSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskCsSum.name=vmware6.disk.commands.summation
+report.vmware6.DiskCsSum.columns=DiskCsSum
+report.vmware6.DiskCsSum.propertiesValues=vmware6DiskName
+report.vmware6.DiskCsSum.type=vmware6Disk
+report.vmware6.DiskCsSum.command=--title="VMware6 disk.commands.summation {vmware6DiskName}" \
+--vertical-label="DiskCsSum" \
+DEF:xxx={rrd1}:DiskCsSum:AVERAGE \
+LINE2:xxx#0000ff:"DiskCsSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskNrRdAdAvg.name=vmware6.disk.numberReadAveraged.average
+report.vmware6.DiskNrRdAdAvg.columns=DiskNrRdAdAvg
+report.vmware6.DiskNrRdAdAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskNrRdAdAvg.type=vmware6Disk
+report.vmware6.DiskNrRdAdAvg.command=--title="VMware6 disk.numberReadAveraged.average {vmware6DiskName}" \
+--vertical-label="DiskNrRdAdAvg" \
+DEF:xxx={rrd1}:DiskNrRdAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskNrRdAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskNrRdSum.name=vmware6.disk.numberRead.summation
+report.vmware6.DiskNrRdSum.columns=DiskNrRdSum
+report.vmware6.DiskNrRdSum.propertiesValues=vmware6DiskName
+report.vmware6.DiskNrRdSum.type=vmware6Disk
+report.vmware6.DiskNrRdSum.command=--title="VMware6 disk.numberRead.summation {vmware6DiskName}" \
+--vertical-label="DiskNrRdSum" \
+DEF:xxx={rrd1}:DiskNrRdSum:AVERAGE \
+LINE2:xxx#0000ff:"DiskNrRdSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskNrWeAdAvg.name=vmware6.disk.numberWriteAveraged.average
+report.vmware6.DiskNrWeAdAvg.columns=DiskNrWeAdAvg
+report.vmware6.DiskNrWeAdAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskNrWeAdAvg.type=vmware6Disk
+report.vmware6.DiskNrWeAdAvg.command=--title="VMware6 disk.numberWriteAveraged.average {vmware6DiskName}" \
+--vertical-label="DiskNrWeAdAvg" \
+DEF:xxx={rrd1}:DiskNrWeAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskNrWeAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskNrWeSum.name=vmware6.disk.numberWrite.summation
+report.vmware6.DiskNrWeSum.columns=DiskNrWeSum
+report.vmware6.DiskNrWeSum.propertiesValues=vmware6DiskName
+report.vmware6.DiskNrWeSum.type=vmware6Disk
+report.vmware6.DiskNrWeSum.command=--title="VMware6 disk.numberWrite.summation {vmware6DiskName}" \
+--vertical-label="DiskNrWeSum" \
+DEF:xxx={rrd1}:DiskNrWeSum:AVERAGE \
+LINE2:xxx#0000ff:"DiskNrWeSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStNrRdAdAvg.name=vmware6.datastore.numberReadAveraged.average
+report.vmware6.DaStNrRdAdAvg.columns=DaStNrRdAdAvg
+report.vmware6.DaStNrRdAdAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStNrRdAdAvg.type=vmware6DaSt
+report.vmware6.DaStNrRdAdAvg.command=--title="VMware6 datastore.numberReadAveraged.average {vmware6DaStName}" \
+--vertical-label="DaStNrRdAdAvg" \
+DEF:xxx={rrd1}:DaStNrRdAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStNrRdAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStNrWeAdAvg.name=vmware6.datastore.numberWriteAveraged.average
+report.vmware6.DaStNrWeAdAvg.columns=DaStNrWeAdAvg
+report.vmware6.DaStNrWeAdAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStNrWeAdAvg.type=vmware6DaSt
+report.vmware6.DaStNrWeAdAvg.command=--title="VMware6 datastore.numberWriteAveraged.average {vmware6DaStName}" \
+--vertical-label="DaStNrWeAdAvg" \
+DEF:xxx={rrd1}:DaStNrWeAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStNrWeAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStRdAvg.name=vmware6.datastore.read.average
+report.vmware6.DaStRdAvg.columns=DaStRdAvg
+report.vmware6.DaStRdAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStRdAvg.type=vmware6DaSt
+report.vmware6.DaStRdAvg.command=--title="VMware6 datastore.read.average {vmware6DaStName}" \
+--vertical-label="DaStRdAvg" \
+DEF:xxx={rrd1}:DaStRdAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStRdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStTlRdLyAvg.name=vmware6.datastore.totalReadLatency.average
+report.vmware6.DaStTlRdLyAvg.columns=DaStTlRdLyAvg
+report.vmware6.DaStTlRdLyAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStTlRdLyAvg.type=vmware6DaSt
+report.vmware6.DaStTlRdLyAvg.command=--title="VMware6 datastore.totalReadLatency.average {vmware6DaStName}" \
+--vertical-label="DaStTlRdLyAvg" \
+DEF:xxx={rrd1}:DaStTlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStTlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStTlWeLyAvg.name=vmware6.datastore.totalWriteLatency.average
+report.vmware6.DaStTlWeLyAvg.columns=DaStTlWeLyAvg
+report.vmware6.DaStTlWeLyAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStTlWeLyAvg.type=vmware6DaSt
+report.vmware6.DaStTlWeLyAvg.command=--title="VMware6 datastore.totalWriteLatency.average {vmware6DaStName}" \
+--vertical-label="DaStTlWeLyAvg" \
+DEF:xxx={rrd1}:DaStTlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStTlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStWeAvg.name=vmware6.datastore.write.average
+report.vmware6.DaStWeAvg.columns=DaStWeAvg
+report.vmware6.DaStWeAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStWeAvg.type=vmware6DaSt
+report.vmware6.DaStWeAvg.command=--title="VMware6 datastore.write.average {vmware6DaStName}" \
+--vertical-label="DaStWeAvg" \
+DEF:xxx={rrd1}:DaStWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrCsAdAvg.name=vmware6.storageAdapter.commandsAveraged.average
+report.vmware6.StAdptrCsAdAvg.columns=StAdptrCsAdAvg
+report.vmware6.StAdptrCsAdAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrCsAdAvg.type=vmware6StAdptr
+report.vmware6.StAdptrCsAdAvg.command=--title="VMware6 storageAdapter.commandsAveraged.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrCsAdAvg" \
+DEF:xxx={rrd1}:StAdptrCsAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrCsAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrNrRdAdAvg.name=vmware6.storageAdapter.numberReadAveraged.average
+report.vmware6.StAdptrNrRdAdAvg.columns=StAdptrNrRdAdAvg
+report.vmware6.StAdptrNrRdAdAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrNrRdAdAvg.type=vmware6StAdptr
+report.vmware6.StAdptrNrRdAdAvg.command=--title="VMware6 storageAdapter.numberReadAveraged.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrNrRdAdAvg" \
+DEF:xxx={rrd1}:StAdptrNrRdAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrNrRdAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrNrWeAdAvg.name=vmware6.storageAdapter.numberWriteAveraged.average
+report.vmware6.StAdptrNrWeAdAvg.columns=StAdptrNrWeAdAvg
+report.vmware6.StAdptrNrWeAdAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrNrWeAdAvg.type=vmware6StAdptr
+report.vmware6.StAdptrNrWeAdAvg.command=--title="VMware6 storageAdapter.numberWriteAveraged.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrNrWeAdAvg" \
+DEF:xxx={rrd1}:StAdptrNrWeAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrNrWeAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrRdAvg.name=vmware6.storageAdapter.read.average
+report.vmware6.StAdptrRdAvg.columns=StAdptrRdAvg
+report.vmware6.StAdptrRdAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrRdAvg.type=vmware6StAdptr
+report.vmware6.StAdptrRdAvg.command=--title="VMware6 storageAdapter.read.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrRdAvg" \
+DEF:xxx={rrd1}:StAdptrRdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrRdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrTlRdLyAvg.name=vmware6.storageAdapter.totalReadLatency.average
+report.vmware6.StAdptrTlRdLyAvg.columns=StAdptrTlRdLyAvg
+report.vmware6.StAdptrTlRdLyAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrTlRdLyAvg.type=vmware6StAdptr
+report.vmware6.StAdptrTlRdLyAvg.command=--title="VMware6 storageAdapter.totalReadLatency.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrTlRdLyAvg" \
+DEF:xxx={rrd1}:StAdptrTlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrTlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrTlWeLyAvg.name=vmware6.storageAdapter.totalWriteLatency.average
+report.vmware6.StAdptrTlWeLyAvg.columns=StAdptrTlWeLyAvg
+report.vmware6.StAdptrTlWeLyAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrTlWeLyAvg.type=vmware6StAdptr
+report.vmware6.StAdptrTlWeLyAvg.command=--title="VMware6 storageAdapter.totalWriteLatency.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrTlWeLyAvg" \
+DEF:xxx={rrd1}:StAdptrTlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrTlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrWeAvg.name=vmware6.storageAdapter.write.average
+report.vmware6.StAdptrWeAvg.columns=StAdptrWeAvg
+report.vmware6.StAdptrWeAvg.propertiesValues=vmware6StAdptrName
+report.vmware6.StAdptrWeAvg.type=vmware6StAdptr
+report.vmware6.StAdptrWeAvg.command=--title="VMware6 storageAdapter.write.average {vmware6StAdptrName}" \
+--vertical-label="StAdptrWeAvg" \
+DEF:xxx={rrd1}:StAdptrWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthCsAdAvg.name=vmware6.storagePath.commandsAveraged.average
+report.vmware6.StPthCsAdAvg.columns=StPthCsAdAvg
+report.vmware6.StPthCsAdAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthCsAdAvg.type=vmware6StPth
+report.vmware6.StPthCsAdAvg.command=--title="VMware6 storagePath.commandsAveraged.average {vmware6StPthName}" \
+--vertical-label="StPthCsAdAvg" \
+DEF:xxx={rrd1}:StPthCsAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthCsAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthNrRdAdAvg.name=vmware6.storagePath.numberReadAveraged.average
+report.vmware6.StPthNrRdAdAvg.columns=StPthNrRdAdAvg
+report.vmware6.StPthNrRdAdAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthNrRdAdAvg.type=vmware6StPth
+report.vmware6.StPthNrRdAdAvg.command=--title="VMware6 storagePath.numberReadAveraged.average {vmware6StPthName}" \
+--vertical-label="StPthNrRdAdAvg" \
+DEF:xxx={rrd1}:StPthNrRdAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthNrRdAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthNrWeAdAvg.name=vmware6.storagePath.numberWriteAveraged.average
+report.vmware6.StPthNrWeAdAvg.columns=StPthNrWeAdAvg
+report.vmware6.StPthNrWeAdAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthNrWeAdAvg.type=vmware6StPth
+report.vmware6.StPthNrWeAdAvg.command=--title="VMware6 storagePath.numberWriteAveraged.average {vmware6StPthName}" \
+--vertical-label="StPthNrWeAdAvg" \
+DEF:xxx={rrd1}:StPthNrWeAdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthNrWeAdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthRdAvg.name=vmware6.storagePath.read.average
+report.vmware6.StPthRdAvg.columns=StPthRdAvg
+report.vmware6.StPthRdAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthRdAvg.type=vmware6StPth
+report.vmware6.StPthRdAvg.command=--title="VMware6 storagePath.read.average {vmware6StPthName}" \
+--vertical-label="StPthRdAvg" \
+DEF:xxx={rrd1}:StPthRdAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthRdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthTlRdLyAvg.name=vmware6.storagePath.totalReadLatency.average
+report.vmware6.StPthTlRdLyAvg.columns=StPthTlRdLyAvg
+report.vmware6.StPthTlRdLyAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthTlRdLyAvg.type=vmware6StPth
+report.vmware6.StPthTlRdLyAvg.command=--title="VMware6 storagePath.totalReadLatency.average {vmware6StPthName}" \
+--vertical-label="StPthTlRdLyAvg" \
+DEF:xxx={rrd1}:StPthTlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthTlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthTlWeLyAvg.name=vmware6.storagePath.totalWriteLatency.average
+report.vmware6.StPthTlWeLyAvg.columns=StPthTlWeLyAvg
+report.vmware6.StPthTlWeLyAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthTlWeLyAvg.type=vmware6StPth
+report.vmware6.StPthTlWeLyAvg.command=--title="VMware6 storagePath.totalWriteLatency.average {vmware6StPthName}" \
+--vertical-label="StPthTlWeLyAvg" \
+DEF:xxx={rrd1}:StPthTlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthTlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthWeAvg.name=vmware6.storagePath.write.average
+report.vmware6.StPthWeAvg.columns=StPthWeAvg
+report.vmware6.StPthWeAvg.propertiesValues=vmware6StPthName
+report.vmware6.StPthWeAvg.type=vmware6StPth
+report.vmware6.StPthWeAvg.command=--title="VMware6 storagePath.write.average {vmware6StPthName}" \
+--vertical-label="StPthWeAvg" \
+DEF:xxx={rrd1}:StPthWeAvg:AVERAGE \
+LINE2:xxx#0000ff:"StPthWeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuRdCyAvg.name=vmware6.cpu.reservedCapacity.average
+report.vmware6.CpuRdCyAvg.columns=CpuRdCyAvg
+report.vmware6.CpuRdCyAvg.type=nodeSnmp
+report.vmware6.CpuRdCyAvg.command=--title="VMware6 cpu.reservedCapacity.average" \
+--vertical-label="CpuRdCyAvg" \
+DEF:xxx={rrd1}:CpuRdCyAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuRdCyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuTlCyAvg.name=vmware6.cpu.totalCapacity.average
+report.vmware6.CpuTlCyAvg.columns=CpuTlCyAvg
+report.vmware6.CpuTlCyAvg.type=nodeSnmp
+report.vmware6.CpuTlCyAvg.command=--title="VMware6 cpu.totalCapacity.average" \
+--vertical-label="CpuTlCyAvg" \
+DEF:xxx={rrd1}:CpuTlCyAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuTlCyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.HbrHbrNetRxAvg.name=vmware6.hbr.hbrNetRx.average
+report.vmware6.HbrHbrNetRxAvg.columns=HbrHbrNetRxAvg
+report.vmware6.HbrHbrNetRxAvg.type=nodeSnmp
+report.vmware6.HbrHbrNetRxAvg.command=--title="VMware6 hbr.hbrNetRx.average" \
+--vertical-label="HbrHbrNetRxAvg" \
+DEF:xxx={rrd1}:HbrHbrNetRxAvg:AVERAGE \
+LINE2:xxx#0000ff:"HbrHbrNetRxAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.HbrHbrNetTxAvg.name=vmware6.hbr.hbrNetTx.average
+report.vmware6.HbrHbrNetTxAvg.columns=HbrHbrNetTxAvg
+report.vmware6.HbrHbrNetTxAvg.type=nodeSnmp
+report.vmware6.HbrHbrNetTxAvg.command=--title="VMware6 hbr.hbrNetTx.average" \
+--vertical-label="HbrHbrNetTxAvg" \
+DEF:xxx={rrd1}:HbrHbrNetTxAvg:AVERAGE \
+LINE2:xxx#0000ff:"HbrHbrNetTxAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.HbrHbrNumVmsAvg.name=vmware6.hbr.hbrNumVms.average
+report.vmware6.HbrHbrNumVmsAvg.columns=HbrHbrNumVmsAvg
+report.vmware6.HbrHbrNumVmsAvg.type=nodeSnmp
+report.vmware6.HbrHbrNumVmsAvg.command=--title="VMware6 hbr.hbrNumVms.average" \
+--vertical-label="HbrHbrNumVmsAvg" \
+DEF:xxx={rrd1}:HbrHbrNumVmsAvg:AVERAGE \
+LINE2:xxx#0000ff:"HbrHbrNumVmsAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemHeapAvg.name=vmware6.mem.heap.average
+report.vmware6.MemHeapAvg.columns=MemHeapAvg
+report.vmware6.MemHeapAvg.type=nodeSnmp
+report.vmware6.MemHeapAvg.command=--title="VMware6 mem.heap.average" \
+--vertical-label="MemHeapAvg" \
+DEF:xxx={rrd1}:MemHeapAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemHeapAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemHeapfreeAvg.name=vmware6.mem.heapfree.average
+report.vmware6.MemHeapfreeAvg.columns=MemHeapfreeAvg
+report.vmware6.MemHeapfreeAvg.type=nodeSnmp
+report.vmware6.MemHeapfreeAvg.command=--title="VMware6 mem.heapfree.average" \
+--vertical-label="MemHeapfreeAvg" \
+DEF:xxx={rrd1}:MemHeapfreeAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemHeapfreeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLlSpInAvg.name=vmware6.mem.llSwapIn.average
+report.vmware6.MemLlSpInAvg.columns=MemLlSpInAvg
+report.vmware6.MemLlSpInAvg.type=nodeSnmp
+report.vmware6.MemLlSpInAvg.command=--title="VMware6 mem.llSwapIn.average" \
+--vertical-label="MemLlSpInAvg" \
+DEF:xxx={rrd1}:MemLlSpInAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLlSpInAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLlSpOutAvg.name=vmware6.mem.llSwapOut.average
+report.vmware6.MemLlSpOutAvg.columns=MemLlSpOutAvg
+report.vmware6.MemLlSpOutAvg.type=nodeSnmp
+report.vmware6.MemLlSpOutAvg.command=--title="VMware6 mem.llSwapOut.average" \
+--vertical-label="MemLlSpOutAvg" \
+DEF:xxx={rrd1}:MemLlSpOutAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLlSpOutAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemLowfreeTdAvg.name=vmware6.mem.lowfreethreshold.average
+report.vmware6.MemLowfreeTdAvg.columns=MemLowfreeTdAvg
+report.vmware6.MemLowfreeTdAvg.type=nodeSnmp
+report.vmware6.MemLowfreeTdAvg.command=--title="VMware6 mem.lowfreethreshold.average" \
+--vertical-label="MemLowfreeTdAvg" \
+DEF:xxx={rrd1}:MemLowfreeTdAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemLowfreeTdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemRdCyAvg.name=vmware6.mem.reservedCapacity.average
+report.vmware6.MemRdCyAvg.columns=MemRdCyAvg
+report.vmware6.MemRdCyAvg.type=nodeSnmp
+report.vmware6.MemRdCyAvg.command=--title="VMware6 mem.reservedCapacity.average" \
+--vertical-label="MemRdCyAvg" \
+DEF:xxx={rrd1}:MemRdCyAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemRdCyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSharedcommonAvg.name=vmware6.mem.sharedcommon.average
+report.vmware6.MemSharedcommonAvg.columns=MemSharedcommonAvg
+report.vmware6.MemSharedcommonAvg.type=nodeSnmp
+report.vmware6.MemSharedcommonAvg.command=--title="VMware6 mem.sharedcommon.average" \
+--vertical-label="MemSharedcommonAvg" \
+DEF:xxx={rrd1}:MemSharedcommonAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSharedcommonAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSpusedAvg.name=vmware6.mem.swapused.average
+report.vmware6.MemSpusedAvg.columns=MemSpusedAvg
+report.vmware6.MemSpusedAvg.type=nodeSnmp
+report.vmware6.MemSpusedAvg.command=--title="VMware6 mem.swapused.average" \
+--vertical-label="MemSpusedAvg" \
+DEF:xxx={rrd1}:MemSpusedAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSpusedAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemStateLat.name=vmware6.mem.state.latest
+report.vmware6.MemStateLat.columns=MemStateLat
+report.vmware6.MemStateLat.type=nodeSnmp
+report.vmware6.MemStateLat.command=--title="VMware6 mem.state.latest" \
+--vertical-label="MemStateLat" \
+DEF:xxx={rrd1}:MemStateLat:AVERAGE \
+LINE2:xxx#0000ff:"MemStateLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemSysUsageAvg.name=vmware6.mem.sysUsage.average
+report.vmware6.MemSysUsageAvg.columns=MemSysUsageAvg
+report.vmware6.MemSysUsageAvg.type=nodeSnmp
+report.vmware6.MemSysUsageAvg.command=--title="VMware6 mem.sysUsage.average" \
+--vertical-label="MemSysUsageAvg" \
+DEF:xxx={rrd1}:MemSysUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemSysUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemTlCyAvg.name=vmware6.mem.totalCapacity.average
+report.vmware6.MemTlCyAvg.columns=MemTlCyAvg
+report.vmware6.MemTlCyAvg.type=nodeSnmp
+report.vmware6.MemTlCyAvg.command=--title="VMware6 mem.totalCapacity.average" \
+--vertical-label="MemTlCyAvg" \
+DEF:xxx={rrd1}:MemTlCyAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemTlCyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemUdAvg.name=vmware6.mem.unreserved.average
+report.vmware6.MemUdAvg.columns=MemUdAvg
+report.vmware6.MemUdAvg.type=nodeSnmp
+report.vmware6.MemUdAvg.command=--title="VMware6 mem.unreserved.average" \
+--vertical-label="MemUdAvg" \
+DEF:xxx={rrd1}:MemUdAvg:AVERAGE \
+LINE2:xxx#0000ff:"MemUdAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfs.pbc.OdLat.name=vmware6.mem.vmfs.pbc.overhead.latest
+report.vmware6.MemVmfs.pbc.OdLat.columns=MemVmfsPbcOdLat
+report.vmware6.MemVmfs.pbc.OdLat.type=nodeSnmp
+report.vmware6.MemVmfs.pbc.OdLat.command=--title="VMware6 mem.vmfs.pbc.overhead.latest" \
+--vertical-label="MemVmfs.pbc.OdLat" \
+DEF:xxx={rrd1}:MemVmfsPbcOdLat:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfs.pbc.OdLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfsPbcCpMsRtiLt.name=vmware6.mem.vmfs.pbc.capMissRatio.latest
+report.vmware6.MemVmfsPbcCpMsRtiLt.columns=MemVmfsPbcCpMsRtiLt
+report.vmware6.MemVmfsPbcCpMsRtiLt.type=nodeSnmp
+report.vmware6.MemVmfsPbcCpMsRtiLt.command=--title="VMware6 mem.vmfs.pbc.capMissRatio.latest" \
+--vertical-label="MemVmfsPbcCpMsRtiLt" \
+DEF:xxx={rrd1}:MemVmfsPbcCpMsRtiLt:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfsPbcCpMsRtiLt" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfs.pbc.sizeLat.name=vmware6.mem.vmfs.pbc.size.latest
+report.vmware6.MemVmfs.pbc.sizeLat.columns=MemVmfsPbcSizeLat
+report.vmware6.MemVmfs.pbc.sizeLat.type=nodeSnmp
+report.vmware6.MemVmfs.pbc.sizeLat.command=--title="VMware6 mem.vmfs.pbc.size.latest" \
+--vertical-label="MemVmfs.pbc.sizeLat" \
+DEF:xxx={rrd1}:MemVmfsPbcSizeLat:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfs.pbc.sizeLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfsPbcSizMaxLat.name=vmware6.mem.vmfs.pbc.sizeMax.latest
+report.vmware6.MemVmfsPbcSizMaxLat.columns=MemVmfsPbcSizMaxLat
+report.vmware6.MemVmfsPbcSizMaxLat.type=nodeSnmp
+report.vmware6.MemVmfsPbcSizMaxLat.command=--title="VMware6 mem.vmfs.pbc.sizeMax.latest" \
+--vertical-label="MemVmfsPbcSizMaxLat" \
+DEF:xxx={rrd1}:MemVmfsPbcSizMaxLat:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfsPbcSizMaxLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfsPbcWrkSetLat.name=vmware6.mem.vmfs.pbc.workingSet.latest
+report.vmware6.MemVmfsPbcWrkSetLat.columns=MemVmfsPbcWrkSetLat
+report.vmware6.MemVmfsPbcWrkSetLat.type=nodeSnmp
+report.vmware6.MemVmfsPbcWrkSetLat.command=--title="VMware6 mem.vmfs.pbc.workingSet.latest" \
+--vertical-label="MemVmfsPbcWrkSetLat" \
+DEF:xxx={rrd1}:MemVmfsPbcWrkSetLat:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfsPbcWrkSetLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.MemVmfsPbcWrkStMxLt.name=vmware6.mem.vmfs.pbc.workingSetMax.latest
+report.vmware6.MemVmfsPbcWrkStMxLt.columns=MemVmfsPbcWrkStMxLt
+report.vmware6.MemVmfsPbcWrkStMxLt.type=nodeSnmp
+report.vmware6.MemVmfsPbcWrkStMxLt.command=--title="VMware6 mem.vmfs.pbc.workingSetMax.latest" \
+--vertical-label="MemVmfsPbcWrkStMxLt" \
+DEF:xxx={rrd1}:MemVmfsPbcWrkStMxLt:AVERAGE \
+LINE2:xxx#0000ff:"MemVmfsPbcWrkStMxLt" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.PowerPowerCapAvg.name=vmware6.power.powerCap.average
+report.vmware6.PowerPowerCapAvg.columns=PowerPowerCapAvg
+report.vmware6.PowerPowerCapAvg.type=nodeSnmp
+report.vmware6.PowerPowerCapAvg.command=--title="VMware6 power.powerCap.average" \
+--vertical-label="PowerPowerCapAvg" \
+DEF:xxx={rrd1}:PowerPowerCapAvg:AVERAGE \
+LINE2:xxx#0000ff:"PowerPowerCapAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StAdptrMaxTlLyLat.name=vmware6.storageAdapter.maxTotalLatency.latest
+report.vmware6.StAdptrMaxTlLyLat.columns=StAdptrMaxTlLyLat
+report.vmware6.StAdptrMaxTlLyLat.type=nodeSnmp
+report.vmware6.StAdptrMaxTlLyLat.command=--title="VMware6 storageAdapter.maxTotalLatency.latest" \
+--vertical-label="StAdptrMaxTlLyLat" \
+DEF:xxx={rrd1}:StAdptrMaxTlLyLat:AVERAGE \
+LINE2:xxx#0000ff:"StAdptrMaxTlLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.StPthMaxTlLyLat.name=vmware6.storagePath.maxTotalLatency.latest
+report.vmware6.StPthMaxTlLyLat.columns=StPthMaxTlLyLat
+report.vmware6.StPthMaxTlLyLat.type=nodeSnmp
+report.vmware6.StPthMaxTlLyLat.command=--title="VMware6 storagePath.maxTotalLatency.latest" \
+--vertical-label="StPthMaxTlLyLat" \
+DEF:xxx={rrd1}:StPthMaxTlLyLat:AVERAGE \
+LINE2:xxx#0000ff:"StPthMaxTlLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuCoreUnAvg.name=vmware6.cpu.coreUtilization.average
+report.vmware6.CpuCoreUnAvg.columns=CpuCoreUnAvg
+report.vmware6.CpuCoreUnAvg.propertiesValues=vmware6CpuName
+report.vmware6.CpuCoreUnAvg.type=vmware6Cpu
+report.vmware6.CpuCoreUnAvg.command=--title="VMware6 cpu.coreUtilization.average {vmware6CpuName}" \
+--vertical-label="CpuCoreUnAvg" \
+DEF:xxx={rrd1}:CpuCoreUnAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuCoreUnAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.CpuUnAvg.name=vmware6.cpu.utilization.average
+report.vmware6.CpuUnAvg.columns=CpuUnAvg
+report.vmware6.CpuUnAvg.propertiesValues=vmware6CpuName
+report.vmware6.CpuUnAvg.type=vmware6Cpu
+report.vmware6.CpuUnAvg.command=--title="VMware6 cpu.utilization.average {vmware6CpuName}" \
+--vertical-label="CpuUnAvg" \
+DEF:xxx={rrd1}:CpuUnAvg:AVERAGE \
+LINE2:xxx#0000ff:"CpuUnAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuAcMinLat.name=vmware6.sys.resourceCpuAllocMin.latest
+report.vmware6.SysReCpuAcMinLat.columns=SysReCpuAcMinLat
+report.vmware6.SysReCpuAcMinLat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuAcMinLat.type=vmware6Sys
+report.vmware6.SysReCpuAcMinLat.command=--title="VMware6 sys.resourceCpuAllocMin.latest {vmware6SysName}" \
+--vertical-label="SysReCpuAcMinLat" \
+DEF:xxx={rrd1}:SysReCpuAcMinLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuAcMinLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuAcSsLat.name=vmware6.sys.resourceCpuAllocShares.latest
+report.vmware6.SysReCpuAcSsLat.columns=SysReCpuAcSsLat
+report.vmware6.SysReCpuAcSsLat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuAcSsLat.type=vmware6Sys
+report.vmware6.SysReCpuAcSsLat.command=--title="VMware6 sys.resourceCpuAllocShares.latest {vmware6SysName}" \
+--vertical-label="SysReCpuAcSsLat" \
+DEF:xxx={rrd1}:SysReCpuAcSsLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuAcSsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuAct1Lat.name=vmware6.sys.resourceCpuAct1.latest
+report.vmware6.SysReCpuAct1Lat.columns=SysReCpuAct1Lat
+report.vmware6.SysReCpuAct1Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuAct1Lat.type=vmware6Sys
+report.vmware6.SysReCpuAct1Lat.command=--title="VMware6 sys.resourceCpuAct1.latest {vmware6SysName}" \
+--vertical-label="SysReCpuAct1Lat" \
+DEF:xxx={rrd1}:SysReCpuAct1Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuAct1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuAct5Lat.name=vmware6.sys.resourceCpuAct5.latest
+report.vmware6.SysReCpuAct5Lat.columns=SysReCpuAct5Lat
+report.vmware6.SysReCpuAct5Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuAct5Lat.type=vmware6Sys
+report.vmware6.SysReCpuAct5Lat.command=--title="VMware6 sys.resourceCpuAct5.latest {vmware6SysName}" \
+--vertical-label="SysReCpuAct5Lat" \
+DEF:xxx={rrd1}:SysReCpuAct5Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuAct5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuMaxLd1Lat.name=vmware6.sys.resourceCpuMaxLimited1.latest
+report.vmware6.SysReCpuMaxLd1Lat.columns=SysReCpuMaxLd1Lat
+report.vmware6.SysReCpuMaxLd1Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuMaxLd1Lat.type=vmware6Sys
+report.vmware6.SysReCpuMaxLd1Lat.command=--title="VMware6 sys.resourceCpuMaxLimited1.latest {vmware6SysName}" \
+--vertical-label="SysReCpuMaxLd1Lat" \
+DEF:xxx={rrd1}:SysReCpuMaxLd1Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuMaxLd1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuMaxLd5Lat.name=vmware6.sys.resourceCpuMaxLimited5.latest
+report.vmware6.SysReCpuMaxLd5Lat.columns=SysReCpuMaxLd5Lat
+report.vmware6.SysReCpuMaxLd5Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuMaxLd5Lat.type=vmware6Sys
+report.vmware6.SysReCpuMaxLd5Lat.command=--title="VMware6 sys.resourceCpuMaxLimited5.latest {vmware6SysName}" \
+--vertical-label="SysReCpuMaxLd5Lat" \
+DEF:xxx={rrd1}:SysReCpuMaxLd5Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuMaxLd5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuRun1Lat.name=vmware6.sys.resourceCpuRun1.latest
+report.vmware6.SysReCpuRun1Lat.columns=SysReCpuRun1Lat
+report.vmware6.SysReCpuRun1Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuRun1Lat.type=vmware6Sys
+report.vmware6.SysReCpuRun1Lat.command=--title="VMware6 sys.resourceCpuRun1.latest {vmware6SysName}" \
+--vertical-label="SysReCpuRun1Lat" \
+DEF:xxx={rrd1}:SysReCpuRun1Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuRun1Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuRun5Lat.name=vmware6.sys.resourceCpuRun5.latest
+report.vmware6.SysReCpuRun5Lat.columns=SysReCpuRun5Lat
+report.vmware6.SysReCpuRun5Lat.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuRun5Lat.type=vmware6Sys
+report.vmware6.SysReCpuRun5Lat.command=--title="VMware6 sys.resourceCpuRun5.latest {vmware6SysName}" \
+--vertical-label="SysReCpuRun5Lat" \
+DEF:xxx={rrd1}:SysReCpuRun5Lat:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuRun5Lat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReCpuUsageAvg.name=vmware6.sys.resourceCpuUsage.average
+report.vmware6.SysReCpuUsageAvg.columns=SysReCpuUsageAvg
+report.vmware6.SysReCpuUsageAvg.propertiesValues=vmware6SysName
+report.vmware6.SysReCpuUsageAvg.type=vmware6Sys
+report.vmware6.SysReCpuUsageAvg.command=--title="VMware6 sys.resourceCpuUsage.average {vmware6SysName}" \
+--vertical-label="SysReCpuUsageAvg" \
+DEF:xxx={rrd1}:SysReCpuUsageAvg:AVERAGE \
+LINE2:xxx#0000ff:"SysReCpuUsageAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReFdUsageLat.name=vmware6.sys.resourceFdUsage.latest
+report.vmware6.SysReFdUsageLat.columns=SysReFdUsageLat
+report.vmware6.SysReFdUsageLat.propertiesValues=vmware6SysName
+report.vmware6.SysReFdUsageLat.type=vmware6Sys
+report.vmware6.SysReFdUsageLat.command=--title="VMware6 sys.resourceFdUsage.latest {vmware6SysName}" \
+--vertical-label="SysReFdUsageLat" \
+DEF:xxx={rrd1}:SysReFdUsageLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReFdUsageLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemAcMaxLat.name=vmware6.sys.resourceMemAllocMax.latest
+report.vmware6.SysReMemAcMaxLat.columns=SysReMemAcMaxLat
+report.vmware6.SysReMemAcMaxLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemAcMaxLat.type=vmware6Sys
+report.vmware6.SysReMemAcMaxLat.command=--title="VMware6 sys.resourceMemAllocMax.latest {vmware6SysName}" \
+--vertical-label="SysReMemAcMaxLat" \
+DEF:xxx={rrd1}:SysReMemAcMaxLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemAcMaxLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemAcMinLat.name=vmware6.sys.resourceMemAllocMin.latest
+report.vmware6.SysReMemAcMinLat.columns=SysReMemAcMinLat
+report.vmware6.SysReMemAcMinLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemAcMinLat.type=vmware6Sys
+report.vmware6.SysReMemAcMinLat.command=--title="VMware6 sys.resourceMemAllocMin.latest {vmware6SysName}" \
+--vertical-label="SysReMemAcMinLat" \
+DEF:xxx={rrd1}:SysReMemAcMinLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemAcMinLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemAcSsLat.name=vmware6.sys.resourceMemAllocShares.latest
+report.vmware6.SysReMemAcSsLat.columns=SysReMemAcSsLat
+report.vmware6.SysReMemAcSsLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemAcSsLat.type=vmware6Sys
+report.vmware6.SysReMemAcSsLat.command=--title="VMware6 sys.resourceMemAllocShares.latest {vmware6SysName}" \
+--vertical-label="SysReMemAcSsLat" \
+DEF:xxx={rrd1}:SysReMemAcSsLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemAcSsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemCdLat.name=vmware6.sys.resourceMemConsumed.latest
+report.vmware6.SysReMemCdLat.columns=SysReMemCdLat
+report.vmware6.SysReMemCdLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemCdLat.type=vmware6Sys
+report.vmware6.SysReMemCdLat.command=--title="VMware6 sys.resourceMemConsumed.latest {vmware6SysName}" \
+--vertical-label="SysReMemCdLat" \
+DEF:xxx={rrd1}:SysReMemCdLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemCdLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemCowLat.name=vmware6.sys.resourceMemCow.latest
+report.vmware6.SysReMemCowLat.columns=SysReMemCowLat
+report.vmware6.SysReMemCowLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemCowLat.type=vmware6Sys
+report.vmware6.SysReMemCowLat.command=--title="VMware6 sys.resourceMemCow.latest {vmware6SysName}" \
+--vertical-label="SysReMemCowLat" \
+DEF:xxx={rrd1}:SysReMemCowLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemCowLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemMappedLat.name=vmware6.sys.resourceMemMapped.latest
+report.vmware6.SysReMemMappedLat.columns=SysReMemMappedLat
+report.vmware6.SysReMemMappedLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemMappedLat.type=vmware6Sys
+report.vmware6.SysReMemMappedLat.command=--title="VMware6 sys.resourceMemMapped.latest {vmware6SysName}" \
+--vertical-label="SysReMemMappedLat" \
+DEF:xxx={rrd1}:SysReMemMappedLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemMappedLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemOdLat.name=vmware6.sys.resourceMemOverhead.latest
+report.vmware6.SysReMemOdLat.columns=SysReMemOdLat
+report.vmware6.SysReMemOdLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemOdLat.type=vmware6Sys
+report.vmware6.SysReMemOdLat.command=--title="VMware6 sys.resourceMemOverhead.latest {vmware6SysName}" \
+--vertical-label="SysReMemOdLat" \
+DEF:xxx={rrd1}:SysReMemOdLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemOdLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemSharedLat.name=vmware6.sys.resourceMemShared.latest
+report.vmware6.SysReMemSharedLat.columns=SysReMemSharedLat
+report.vmware6.SysReMemSharedLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemSharedLat.type=vmware6Sys
+report.vmware6.SysReMemSharedLat.command=--title="VMware6 sys.resourceMemShared.latest {vmware6SysName}" \
+--vertical-label="SysReMemSharedLat" \
+DEF:xxx={rrd1}:SysReMemSharedLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemSharedLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemSppedLat.name=vmware6.sys.resourceMemSwapped.latest
+report.vmware6.SysReMemSppedLat.columns=SysReMemSppedLat
+report.vmware6.SysReMemSppedLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemSppedLat.type=vmware6Sys
+report.vmware6.SysReMemSppedLat.command=--title="VMware6 sys.resourceMemSwapped.latest {vmware6SysName}" \
+--vertical-label="SysReMemSppedLat" \
+DEF:xxx={rrd1}:SysReMemSppedLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemSppedLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemTdLat.name=vmware6.sys.resourceMemTouched.latest
+report.vmware6.SysReMemTdLat.columns=SysReMemTdLat
+report.vmware6.SysReMemTdLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemTdLat.type=vmware6Sys
+report.vmware6.SysReMemTdLat.command=--title="VMware6 sys.resourceMemTouched.latest {vmware6SysName}" \
+--vertical-label="SysReMemTdLat" \
+DEF:xxx={rrd1}:SysReMemTdLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemTdLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.SysReMemZeroLat.name=vmware6.sys.resourceMemZero.latest
+report.vmware6.SysReMemZeroLat.columns=SysReMemZeroLat
+report.vmware6.SysReMemZeroLat.propertiesValues=vmware6SysName
+report.vmware6.SysReMemZeroLat.type=vmware6Sys
+report.vmware6.SysReMemZeroLat.command=--title="VMware6 sys.resourceMemZero.latest {vmware6SysName}" \
+--vertical-label="SysReMemZeroLat" \
+DEF:xxx={rrd1}:SysReMemZeroLat:AVERAGE \
+LINE2:xxx#0000ff:"SysReMemZeroLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetErrorsRxSum.name=vmware6.net.errorsRx.summation
+report.vmware6.NetErrorsRxSum.columns=NetErrorsRxSum
+report.vmware6.NetErrorsRxSum.propertiesValues=vmware6NetName
+report.vmware6.NetErrorsRxSum.type=vmware6Net
+report.vmware6.NetErrorsRxSum.command=--title="VMware6 net.errorsRx.summation {vmware6NetName}" \
+--vertical-label="NetErrorsRxSum" \
+DEF:xxx={rrd1}:NetErrorsRxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetErrorsRxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetErrorsTxSum.name=vmware6.net.errorsTx.summation
+report.vmware6.NetErrorsTxSum.columns=NetErrorsTxSum
+report.vmware6.NetErrorsTxSum.propertiesValues=vmware6NetName
+report.vmware6.NetErrorsTxSum.type=vmware6Net
+report.vmware6.NetErrorsTxSum.command=--title="VMware6 net.errorsTx.summation {vmware6NetName}" \
+--vertical-label="NetErrorsTxSum" \
+DEF:xxx={rrd1}:NetErrorsTxSum:AVERAGE \
+LINE2:xxx#0000ff:"NetErrorsTxSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.NetUnknownPsSum.name=vmware6.net.unknownProtos.summation
+report.vmware6.NetUnknownPsSum.columns=NetUnknownPsSum
+report.vmware6.NetUnknownPsSum.propertiesValues=vmware6NetName
+report.vmware6.NetUnknownPsSum.type=vmware6Net
+report.vmware6.NetUnknownPsSum.command=--title="VMware6 net.unknownProtos.summation {vmware6NetName}" \
+--vertical-label="NetUnknownPsSum" \
+DEF:xxx={rrd1}:NetUnknownPsSum:AVERAGE \
+LINE2:xxx#0000ff:"NetUnknownPsSum" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskDeLyAvg.name=vmware6.disk.deviceLatency.average
+report.vmware6.DiskDeLyAvg.columns=DiskDeLyAvg
+report.vmware6.DiskDeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskDeLyAvg.type=vmware6Disk
+report.vmware6.DiskDeLyAvg.command=--title="VMware6 disk.deviceLatency.average {vmware6DiskName}" \
+--vertical-label="DiskDeLyAvg" \
+DEF:xxx={rrd1}:DiskDeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskDeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskDeRdLyAvg.name=vmware6.disk.deviceReadLatency.average
+report.vmware6.DiskDeRdLyAvg.columns=DiskDeRdLyAvg
+report.vmware6.DiskDeRdLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskDeRdLyAvg.type=vmware6Disk
+report.vmware6.DiskDeRdLyAvg.command=--title="VMware6 disk.deviceReadLatency.average {vmware6DiskName}" \
+--vertical-label="DiskDeRdLyAvg" \
+DEF:xxx={rrd1}:DiskDeRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskDeRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskDeWeLyAvg.name=vmware6.disk.deviceWriteLatency.average
+report.vmware6.DiskDeWeLyAvg.columns=DiskDeWeLyAvg
+report.vmware6.DiskDeWeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskDeWeLyAvg.type=vmware6Disk
+report.vmware6.DiskDeWeLyAvg.command=--title="VMware6 disk.deviceWriteLatency.average {vmware6DiskName}" \
+--vertical-label="DiskDeWeLyAvg" \
+DEF:xxx={rrd1}:DiskDeWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskDeWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskKlLyAvg.name=vmware6.disk.kernelLatency.average
+report.vmware6.DiskKlLyAvg.columns=DiskKlLyAvg
+report.vmware6.DiskKlLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskKlLyAvg.type=vmware6Disk
+report.vmware6.DiskKlLyAvg.command=--title="VMware6 disk.kernelLatency.average {vmware6DiskName}" \
+--vertical-label="DiskKlLyAvg" \
+DEF:xxx={rrd1}:DiskKlLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskKlLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskKlRdLyAvg.name=vmware6.disk.kernelReadLatency.average
+report.vmware6.DiskKlRdLyAvg.columns=DiskKlRdLyAvg
+report.vmware6.DiskKlRdLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskKlRdLyAvg.type=vmware6Disk
+report.vmware6.DiskKlRdLyAvg.command=--title="VMware6 disk.kernelReadLatency.average {vmware6DiskName}" \
+--vertical-label="DiskKlRdLyAvg" \
+DEF:xxx={rrd1}:DiskKlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskKlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskKlWeLyAvg.name=vmware6.disk.kernelWriteLatency.average
+report.vmware6.DiskKlWeLyAvg.columns=DiskKlWeLyAvg
+report.vmware6.DiskKlWeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskKlWeLyAvg.type=vmware6Disk
+report.vmware6.DiskKlWeLyAvg.command=--title="VMware6 disk.kernelWriteLatency.average {vmware6DiskName}" \
+--vertical-label="DiskKlWeLyAvg" \
+DEF:xxx={rrd1}:DiskKlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskKlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskMaxQeDhAvg.name=vmware6.disk.maxQueueDepth.average
+report.vmware6.DiskMaxQeDhAvg.columns=DiskMaxQeDhAvg
+report.vmware6.DiskMaxQeDhAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskMaxQeDhAvg.type=vmware6Disk
+report.vmware6.DiskMaxQeDhAvg.command=--title="VMware6 disk.maxQueueDepth.average {vmware6DiskName}" \
+--vertical-label="DiskMaxQeDhAvg" \
+DEF:xxx={rrd1}:DiskMaxQeDhAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskMaxQeDhAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskQeLyAvg.name=vmware6.disk.queueLatency.average
+report.vmware6.DiskQeLyAvg.columns=DiskQeLyAvg
+report.vmware6.DiskQeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskQeLyAvg.type=vmware6Disk
+report.vmware6.DiskQeLyAvg.command=--title="VMware6 disk.queueLatency.average {vmware6DiskName}" \
+--vertical-label="DiskQeLyAvg" \
+DEF:xxx={rrd1}:DiskQeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskQeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskQeRdLyAvg.name=vmware6.disk.queueReadLatency.average
+report.vmware6.DiskQeRdLyAvg.columns=DiskQeRdLyAvg
+report.vmware6.DiskQeRdLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskQeRdLyAvg.type=vmware6Disk
+report.vmware6.DiskQeRdLyAvg.command=--title="VMware6 disk.queueReadLatency.average {vmware6DiskName}" \
+--vertical-label="DiskQeRdLyAvg" \
+DEF:xxx={rrd1}:DiskQeRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskQeRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskQeWeLyAvg.name=vmware6.disk.queueWriteLatency.average
+report.vmware6.DiskQeWeLyAvg.columns=DiskQeWeLyAvg
+report.vmware6.DiskQeWeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskQeWeLyAvg.type=vmware6Disk
+report.vmware6.DiskQeWeLyAvg.command=--title="VMware6 disk.queueWriteLatency.average {vmware6DiskName}" \
+--vertical-label="DiskQeWeLyAvg" \
+DEF:xxx={rrd1}:DiskQeWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskQeWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskTlLyAvg.name=vmware6.disk.totalLatency.average
+report.vmware6.DiskTlLyAvg.columns=DiskTlLyAvg
+report.vmware6.DiskTlLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskTlLyAvg.type=vmware6Disk
+report.vmware6.DiskTlLyAvg.command=--title="VMware6 disk.totalLatency.average {vmware6DiskName}" \
+--vertical-label="DiskTlLyAvg" \
+DEF:xxx={rrd1}:DiskTlLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskTlLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskTlRdLyAvg.name=vmware6.disk.totalReadLatency.average
+report.vmware6.DiskTlRdLyAvg.columns=DiskTlRdLyAvg
+report.vmware6.DiskTlRdLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskTlRdLyAvg.type=vmware6Disk
+report.vmware6.DiskTlRdLyAvg.command=--title="VMware6 disk.totalReadLatency.average {vmware6DiskName}" \
+--vertical-label="DiskTlRdLyAvg" \
+DEF:xxx={rrd1}:DiskTlRdLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskTlRdLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DiskTlWeLyAvg.name=vmware6.disk.totalWriteLatency.average
+report.vmware6.DiskTlWeLyAvg.columns=DiskTlWeLyAvg
+report.vmware6.DiskTlWeLyAvg.propertiesValues=vmware6DiskName
+report.vmware6.DiskTlWeLyAvg.type=vmware6Disk
+report.vmware6.DiskTlWeLyAvg.command=--title="VMware6 disk.totalWriteLatency.average {vmware6DiskName}" \
+--vertical-label="DiskTlWeLyAvg" \
+DEF:xxx={rrd1}:DiskTlWeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DiskTlWeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.vflModNumAeVMDKsLat.name=vmware6.vflashModule.numActiveVMDKs.latest
+report.vmware6.vflModNumAeVMDKsLat.columns=vflModNumAeVMDKsLat
+report.vmware6.vflModNumAeVMDKsLat.propertiesValues=vmware6vflashModuleName
+report.vmware6.vflModNumAeVMDKsLat.type=vmware6vflashModule
+report.vmware6.vflModNumAeVMDKsLat.command=--title="VMware6 vflashModule.numActiveVMDKs.latest {vmware6vflashModuleName}" \
+--vertical-label="vflModNumAeVMDKsLat" \
+DEF:xxx={rrd1}:vflModNumAeVMDKsLat:AVERAGE \
+LINE2:xxx#0000ff:"vflModNumAeVMDKsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeIopsAvg.name=vmware6.datastore.datastoreIops.average
+report.vmware6.DaStDeIopsAvg.columns=DaStDeIopsAvg
+report.vmware6.DaStDeIopsAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeIopsAvg.type=vmware6DaSt
+report.vmware6.DaStDeIopsAvg.command=--title="VMware6 datastore.datastoreIops.average {vmware6DaStName}" \
+--vertical-label="DaStDeIopsAvg" \
+DEF:xxx={rrd1}:DaStDeIopsAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeIopsAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeMaxQeDhLat.name=vmware6.datastore.datastoreMaxQueueDepth.latest
+report.vmware6.DaStDeMaxQeDhLat.columns=DaStDeMaxQeDhLat
+report.vmware6.DaStDeMaxQeDhLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeMaxQeDhLat.type=vmware6DaSt
+report.vmware6.DaStDeMaxQeDhLat.command=--title="VMware6 datastore.datastoreMaxQueueDepth.latest {vmware6DaStName}" \
+--vertical-label="DaStDeMaxQeDhLat" \
+DEF:xxx={rrd1}:DaStDeMaxQeDhLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeMaxQeDhLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeNlRdLyLat.name=vmware6.datastore.datastoreNormalReadLatency.latest
+report.vmware6.DaStDeNlRdLyLat.columns=DaStDeNlRdLyLat
+report.vmware6.DaStDeNlRdLyLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeNlRdLyLat.type=vmware6DaSt
+report.vmware6.DaStDeNlRdLyLat.command=--title="VMware6 datastore.datastoreNormalReadLatency.latest {vmware6DaStName}" \
+--vertical-label="DaStDeNlRdLyLat" \
+DEF:xxx={rrd1}:DaStDeNlRdLyLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeNlRdLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeNlWeLyLat.name=vmware6.datastore.datastoreNormalWriteLatency.latest
+report.vmware6.DaStDeNlWeLyLat.columns=DaStDeNlWeLyLat
+report.vmware6.DaStDeNlWeLyLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeNlWeLyLat.type=vmware6DaSt
+report.vmware6.DaStDeNlWeLyLat.command=--title="VMware6 datastore.datastoreNormalWriteLatency.latest {vmware6DaStName}" \
+--vertical-label="DaStDeNlWeLyLat" \
+DEF:xxx={rrd1}:DaStDeNlWeLyLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeNlWeLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeRdBytesLat.name=vmware6.datastore.datastoreReadBytes.latest
+report.vmware6.DaStDeRdBytesLat.columns=DaStDeRdBytesLat
+report.vmware6.DaStDeRdBytesLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeRdBytesLat.type=vmware6DaSt
+report.vmware6.DaStDeRdBytesLat.command=--title="VMware6 datastore.datastoreReadBytes.latest {vmware6DaStName}" \
+--vertical-label="DaStDeRdBytesLat" \
+DEF:xxx={rrd1}:DaStDeRdBytesLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeRdBytesLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeRdIopsLat.name=vmware6.datastore.datastoreReadIops.latest
+report.vmware6.DaStDeRdIopsLat.columns=DaStDeRdIopsLat
+report.vmware6.DaStDeRdIopsLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeRdIopsLat.type=vmware6DaSt
+report.vmware6.DaStDeRdIopsLat.command=--title="VMware6 datastore.datastoreReadIops.latest {vmware6DaStName}" \
+--vertical-label="DaStDeRdIopsLat" \
+DEF:xxx={rrd1}:DaStDeRdIopsLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeRdIopsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeRdLdMcLat.name=vmware6.datastore.datastoreReadLoadMetric.latest
+report.vmware6.DaStDeRdLdMcLat.columns=DaStDeRdLdMcLat
+report.vmware6.DaStDeRdLdMcLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeRdLdMcLat.type=vmware6DaSt
+report.vmware6.DaStDeRdLdMcLat.command=--title="VMware6 datastore.datastoreReadLoadMetric.latest {vmware6DaStName}" \
+--vertical-label="DaStDeRdLdMcLat" \
+DEF:xxx={rrd1}:DaStDeRdLdMcLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeRdLdMcLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeRdOIOLat.name=vmware6.datastore.datastoreReadOIO.latest
+report.vmware6.DaStDeRdOIOLat.columns=DaStDeRdOIOLat
+report.vmware6.DaStDeRdOIOLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeRdOIOLat.type=vmware6DaSt
+report.vmware6.DaStDeRdOIOLat.command=--title="VMware6 datastore.datastoreReadOIO.latest {vmware6DaStName}" \
+--vertical-label="DaStDeRdOIOLat" \
+DEF:xxx={rrd1}:DaStDeRdOIOLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeRdOIOLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeVMOdLyLat.name=vmware6.datastore.datastoreVMObservedLatency.latest
+report.vmware6.DaStDeVMOdLyLat.columns=DaStDeVMOdLyLat
+report.vmware6.DaStDeVMOdLyLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeVMOdLyLat.type=vmware6DaSt
+report.vmware6.DaStDeVMOdLyLat.command=--title="VMware6 datastore.datastoreVMObservedLatency.latest {vmware6DaStName}" \
+--vertical-label="DaStDeVMOdLyLat" \
+DEF:xxx={rrd1}:DaStDeVMOdLyLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeVMOdLyLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeWeBytesLat.name=vmware6.datastore.datastoreWriteBytes.latest
+report.vmware6.DaStDeWeBytesLat.columns=DaStDeWeBytesLat
+report.vmware6.DaStDeWeBytesLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeWeBytesLat.type=vmware6DaSt
+report.vmware6.DaStDeWeBytesLat.command=--title="VMware6 datastore.datastoreWriteBytes.latest {vmware6DaStName}" \
+--vertical-label="DaStDeWeBytesLat" \
+DEF:xxx={rrd1}:DaStDeWeBytesLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeWeBytesLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeWeIopsLat.name=vmware6.datastore.datastoreWriteIops.latest
+report.vmware6.DaStDeWeIopsLat.columns=DaStDeWeIopsLat
+report.vmware6.DaStDeWeIopsLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeWeIopsLat.type=vmware6DaSt
+report.vmware6.DaStDeWeIopsLat.command=--title="VMware6 datastore.datastoreWriteIops.latest {vmware6DaStName}" \
+--vertical-label="DaStDeWeIopsLat" \
+DEF:xxx={rrd1}:DaStDeWeIopsLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeWeIopsLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeWeLdMcLat.name=vmware6.datastore.datastoreWriteLoadMetric.latest
+report.vmware6.DaStDeWeLdMcLat.columns=DaStDeWeLdMcLat
+report.vmware6.DaStDeWeLdMcLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeWeLdMcLat.type=vmware6DaSt
+report.vmware6.DaStDeWeLdMcLat.command=--title="VMware6 datastore.datastoreWriteLoadMetric.latest {vmware6DaStName}" \
+--vertical-label="DaStDeWeLdMcLat" \
+DEF:xxx={rrd1}:DaStDeWeLdMcLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeWeLdMcLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStDeWeOIOLat.name=vmware6.datastore.datastoreWriteOIO.latest
+report.vmware6.DaStDeWeOIOLat.columns=DaStDeWeOIOLat
+report.vmware6.DaStDeWeOIOLat.propertiesValues=vmware6DaStName
+report.vmware6.DaStDeWeOIOLat.type=vmware6DaSt
+report.vmware6.DaStDeWeOIOLat.command=--title="VMware6 datastore.datastoreWriteOIO.latest {vmware6DaStName}" \
+--vertical-label="DaStDeWeOIOLat" \
+DEF:xxx={rrd1}:DaStDeWeOIOLat:AVERAGE \
+LINE2:xxx#0000ff:"DaStDeWeOIOLat" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStSiocAeTePeAvg.name=vmware6.datastore.siocActiveTimePercentage.average
+report.vmware6.DaStSiocAeTePeAvg.columns=DaStSiocAeTePeAvg
+report.vmware6.DaStSiocAeTePeAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStSiocAeTePeAvg.type=vmware6DaSt
+report.vmware6.DaStSiocAeTePeAvg.command=--title="VMware6 datastore.siocActiveTimePercentage.average {vmware6DaStName}" \
+--vertical-label="DaStSiocAeTePeAvg" \
+DEF:xxx={rrd1}:DaStSiocAeTePeAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStSiocAeTePeAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
+
+report.vmware6.DaStSizeNdDeLyAvg.name=vmware6.datastore.sizeNormalizedDatastoreLatency.average
+report.vmware6.DaStSizeNdDeLyAvg.columns=DaStSizeNdDeLyAvg
+report.vmware6.DaStSizeNdDeLyAvg.propertiesValues=vmware6DaStName
+report.vmware6.DaStSizeNdDeLyAvg.type=vmware6DaSt
+report.vmware6.DaStSizeNdDeLyAvg.command=--title="VMware6 datastore.sizeNormalizedDatastoreLatency.average {vmware6DaStName}" \
+--vertical-label="DaStSizeNdDeLyAvg" \
+DEF:xxx={rrd1}:DaStSizeNdDeLyAvg:AVERAGE \
+LINE2:xxx#0000ff:"DaStSizeNdDeLyAvg" \
+GPRINT:xxx:AVERAGE:"Avg \\: %8.2lf %s" \
+GPRINT:xxx:MIN:"Min \\: %8.2lf %s" \
+GPRINT:xxx:MAX:"Max \\: %8.2lf %s\\n"
diff --git a/snmp-graph.properties.d/xmp-graph.properties b/snmp-graph.properties.d/xmp-graph.properties
index 9e06b76..1be5d8e 100644
--- a/snmp-graph.properties.d/xmp-graph.properties
+++ b/snmp-graph.properties.d/xmp-graph.properties
@@ -116,7 +116,7 @@ report.xmp.diskstats.command=--title="Disk Reads/Writes" \
LINE2:w#00ff00:"Disk Writes" \
GPRINT:w:AVERAGE:"Avg \\: %10.2lf %s" \
GPRINT:w:MIN:"Min \\: %10.2lf %s" \
- GPRINT:w:MAX:"Max \\: %10.2lf %s\\n"
+ GPRINT:w:MAX:"Max \\: %10.2lf %s\\n"
report.xmp.diskkb.name=Disk KB I/O
report.xmp.diskkb.columns=diskReadKB, diskWriteKB
@@ -131,7 +131,7 @@ report.xmp.diskkb.command=--title="Disk KB I/O" \
LINE2:w#00ff00:"Disk Write (KB)" \
GPRINT:w:AVERAGE:"Avg \\: %10.2lf %s" \
GPRINT:w:MIN:"Min \\: %10.2lf %s" \
- GPRINT:w:MAX:"Max \\: %10.2lf %s\\n"
+ GPRINT:w:MAX:"Max \\: %10.2lf %s\\n"
report.xmp.xmpdsz.name=Process Sizes
report.xmp.xmpdsz.suppress=xmp.xmpdrss
@@ -202,7 +202,7 @@ report.xmp.cpu.command=--title="Process CPU Time" \
LINE2:cputime#0000ff:"CPU Time" \
GPRINT:cputime:AVERAGE:"Avg \\: %5.2lf %s" \
GPRINT:cputime:MIN:"Min \\: %5.2lf %s" \
- GPRINT:cputime:MAX:"Max \\: %5.2lf %s\\n"
+ GPRINT:cputime:MAX:"Max \\: %5.2lf %s\\n"
report.xmp.bytes.name=Process I/O Bytes
report.xmp.bytes.columns=processReadBytes, processWriteBytes
diff --git a/startup.properties b/startup.properties
index 5e1ecc2..a0f9eae 100644
--- a/startup.properties
+++ b/startup.properties
@@ -28,10 +28,11 @@
#
# Startup core services like logging
#
-org/ops4j/pax/url/pax-url-aether/2.1.0/pax-url-aether-2.1.0.jar=5
-org/ops4j/pax/url/pax-url-wrap/2.1.0/pax-url-wrap-2.1.0-uber.jar=5
-org/ops4j/pax/logging/pax-logging-api/1.7.4/pax-logging-api-1.7.4.jar=8
-org/ops4j/pax/logging/pax-logging-service/1.7.4/pax-logging-service-1.7.4.jar=8
-org/apache/felix/org.apache.felix.configadmin/1.8.0/org.apache.felix.configadmin-1.8.0.jar=10
-org/apache/felix/org.apache.felix.fileinstall/3.4.2/org.apache.felix.fileinstall-3.4.2.jar=11
-org/apache/karaf/features/org.apache.karaf.features.core/2.4.0/org.apache.karaf.features.core-2.4.0.jar=15
+org/ops4j/pax/url/pax-url-aether/2.4.1/pax-url-aether-2.4.1.jar=5
+org/ops4j/pax/url/pax-url-wrap/2.4.1/pax-url-wrap-2.4.1-uber.jar=5
+org/ops4j/pax/logging/pax-logging-api/1.8.3/pax-logging-api-1.8.3.jar=8
+org/ops4j/pax/logging/pax-logging-service/1.8.3/pax-logging-service-1.8.3.jar=8
+org/apache/felix/org.apache.felix.configadmin/1.8.4/org.apache.felix.configadmin-1.8.4.jar=10
+org/apache/felix/org.apache.felix.fileinstall/3.5.0/org.apache.felix.fileinstall-3.5.0.jar=11
+org/apache/karaf/features/org.apache.karaf.features.core/2.4.3/org.apache.karaf.features.core-2.4.3.jar=15
+
diff --git a/statsd-configuration.xml b/statsd-configuration.xml
index a05a8b8..482aa6f 100644
--- a/statsd-configuration.xml
+++ b/statsd-configuration.xml
@@ -1,7 +1,7 @@
<?xml version="1.0"?>
<statistics-daemon-configuration
- xmlns:this="http://www.opennms.org/xsd/config/statsd"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:this="http://www.opennms.org/xsd/config/statsd"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opennms.org/xsd/config/statsd http://www.opennms.org/xsd/config/statistics-daemon-configuration.xsd ">
<!--
@@ -26,7 +26,19 @@
<parameter key="attributeMatch" value="ifInOctets"/>
</packageReport>
</package>
-
+
+ <package name="IOWAITReports">
+ <packageReport name="TopN_IOWait" description="Top 20 iowait across all nodes"
+ schedule="0 17 10 * * ?" retainInterval="2592000000"
+ status="off">
+ <parameter key="count" value="20"/>
+ <parameter key="consolidationFunction" value="AVERAGE"/>
+ <parameter key="relativeTime" value="YESTERDAY"/>
+ <parameter key="resourceTypeMatch" value="nodeSnmp"/>
+ <parameter key="attributeMatch" value="CpuRawWait"/>
+ </packageReport>
+ </package>
+
<package name="ResponseTimeReports">
<packageReport name="Top10_Response_Weekly" description="Weekly Top 10 responses across all nodes"
schedule="0 0 0 ? * MON" retainInterval="2592000000"
@@ -37,7 +49,7 @@
<parameter key="resourceTypeMatch" value="responseTime"/>
<parameter key="attributeMatch" value="icmp"/>
</packageReport>
-
+
<packageReport name="Top10_Response_This_Month" description="This Month Top 10 responses across all nodes"
schedule="0 0 0 L * ?" retainInterval="2592000000"
status="off">
@@ -47,7 +59,7 @@
<parameter key="resourceTypeMatch" value="responseTime"/>
<parameter key="attributeMatch" value="icmp"/>
</packageReport>
-
+
<packageReport name="Top10_Response_Last_Month" description="Last Month Top 10 responses across all nodes"
schedule="0 0 0 1 * ?" retainInterval="2592000000"
status="off">
@@ -57,7 +69,7 @@
<parameter key="resourceTypeMatch" value="responseTime"/>
<parameter key="attributeMatch" value="icmp"/>
</packageReport>
-
+
<packageReport name="Top10_Response_This_Year" description="This Year Top 10 responses across all nodes"
schedule="0 0 0 1 * ?" retainInterval="2592000000"
status="off">
@@ -68,9 +80,10 @@
<parameter key="attributeMatch" value="icmp"/>
</packageReport>
</package>
-
-
+
+
<report name="TopN_InOctets" class-name="org.opennms.netmgt.dao.support.TopNAttributeStatisticVisitor"/>
+ <report name="TopN_IOWait" class-name="org.opennms.netmgt.dao.support.TopNAttributeStatisticVisitor"/>
<report name="Top10_Response_Weekly" class-name="org.opennms.netmgt.dao.support.TopNAttributeStatisticVisitor"/>
<report name="Top10_Response_This_Month" class-name="org.opennms.netmgt.dao.support.TopNAttributeStatisticVisitor"/>
<report name="Top10_Response_Last_Month" class-name="org.opennms.netmgt.dao.support.TopNAttributeStatisticVisitor"/>
diff --git a/trend-configuration.xml b/trend-configuration.xml
new file mode 100644
index 0000000..b0471df
--- /dev/null
+++ b/trend-configuration.xml
@@ -0,0 +1,233 @@
+<?xml version="1.0"?>
+<trend-configuration xmlns="http://xmlns.opennms.org/xsd/config/trend">
+
+ <trend-definition name="nodes">
+ <title>Nodes</title>
+ <subtitle>with Outages</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-fire</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkHeight" value="35"/>
+ <trend-attribute key="sparkChartRangeMin" value="0"/>
+ <trend-attribute key="sparkLineColor" value="#8f5902"/>
+ <trend-attribute key="sparkLineWidth" value="1.5"/>
+ <trend-attribute key="sparkFillColor" value="#e9b96e"/>
+ <trend-attribute key="sparkSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMinSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMaxSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkSpotRadius" value="3"/>
+ <trend-attribute key="sparkHighlightSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkHighlightLineColor" value="#8f5902"/>
+ </trend-attributes>
+ <descriptionLink>outage/list.htm?outtype=current</descriptionLink>
+ <description>${intValue[23]} Nodes with Outage(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(distinct nodeid) from outages o, events e where e.eventid = o.svclosteventid and iflostservice < E and (ifregainedservice is null or ifregainedservice > E)
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="severity">
+ <title>Severity</title>
+ <subtitle>Distribution</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-dashboard</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkType" value="pie"/>
+ <trend-attribute key="sparkHeight" value="35"/>
+ <trend-attribute key="sparkSliceColors" value="[#CC0000,#FF3300,#FF9900,#FFCC00,#999900,#336600,#999]"/>
+ </trend-attributes>
+ <descriptionLink>alarm/list.htm</descriptionLink>
+ <description>Go to Alarms Page</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from alarms where firsteventtime > now() - interval '24 hours' and severity = S
+ ) from (
+ select S from generate_series(1,7) as S
+ ) I order by S desc;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="alarms-new">
+ <title>Alarms</title>
+ <subtitle>Occurence</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-bell</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkType" value="bar"/>
+ <trend-attribute key="sparkBarColor" value="#8f5902"/>
+ <trend-attribute key="sparkHeight" value="35"/>
+ <trend-attribute key="sparkBarWidth" value="3"/>
+ <trend-attribute key="sparkBarSpacing" value="2"/>
+ </trend-attributes>
+ <descriptionLink>alarm/list.htm</descriptionLink>
+ <description>${intValue[23]} New Alarm(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from alarms where firsteventtime > S and firsteventtime <= E
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="alarms-total">
+ <title>Alarms</title>
+ <subtitle>Unacknowledged</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-bell</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkHeight" value="40"/>
+ <trend-attribute key="sparkChartRangeMin" value="0"/>
+ <trend-attribute key="sparkLineColor" value="#8f5902"/>
+ <trend-attribute key="sparkLineWidth" value="1.5"/>
+ <trend-attribute key="sparkFillColor" value="#e9b96e"/>
+ <trend-attribute key="sparkSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMinSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMaxSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkSpotRadius" value="3"/>
+ <trend-attribute key="sparkHighlightSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkHighlightLineColor" value="#8f5902"/>
+ </trend-attributes>
+ <descriptionLink>alarm/list.htm</descriptionLink>
+ <description>${intValue[23]} Unacknowledged Alarm(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from alarms where (alarmacktime is null or alarmacktime > E) and firsteventtime <= E
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="outages-new">
+ <title>Outages</title>
+ <subtitle>Occurence</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-exclamation-sign</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkType" value="bar"/>
+ <trend-attribute key="sparkBarColor" value="#8f5902"/>
+ <trend-attribute key="sparkHeight" value="40"/>
+ <trend-attribute key="sparkBarWidth" value="3"/>
+ <trend-attribute key="sparkBarSpacing" value="2"/>
+ </trend-attributes>
+ <descriptionLink>outage/list.htm?outtype=both</descriptionLink>
+ <description>${intValue[23]} New Outage(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from outages where iflostservice > S and iflostservice <= E
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="outages-total">
+ <title>Outages</title>
+ <subtitle>Current</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-exclamation-sign</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkHeight" value="40"/>
+ <trend-attribute key="sparkChartRangeMin" value="0"/>
+ <trend-attribute key="sparkLineColor" value="#8f5902"/>
+ <trend-attribute key="sparkLineWidth" value="1.5"/>
+ <trend-attribute key="sparkFillColor" value="#e9b96e"/>
+ <trend-attribute key="sparkSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMinSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMaxSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkSpotRadius" value="3"/>
+ <trend-attribute key="sparkHighlightSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkHighlightLineColor" value="#8f5902"/>
+ </trend-attributes>
+ <descriptionLink>outage/list.htm?outtype=current</descriptionLink>
+ <description>${intValue[23]} Current Outage(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from outages where iflostservice < E and (ifregainedservice is null or ifregainedservice > E)
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="bsm-new">
+ <title>Business Services</title>
+ <subtitle>Problem Occurence</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-tasks</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkType" value="bar"/>
+ <trend-attribute key="sparkBarColor" value="#8f5902"/>
+ <trend-attribute key="sparkHeight" value="40"/>
+ <trend-attribute key="sparkBarWidth" value="3"/>
+ <trend-attribute key="sparkBarSpacing" value="2"/>
+ </trend-attributes>
+ <descriptionLink>topology?szl=1amp;layout=Hierarchy+Layout&amp;provider=Business+Services</descriptionLink>
+ <description>${intValue[23]} New Problem(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from alarms where firsteventtime > S and firsteventtime <= E and eventuei='uei.opennms.org/bsm/serviceProblem'
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+ <trend-definition name="bsm-total">
+ <title>Business Services</title>
+ <subtitle>Current Problems</subtitle>
+ <visible>true</visible>
+ <icon>glyphicon-tasks</icon>
+ <trend-attributes>
+ <trend-attribute key="sparkWidth" value="100%"/>
+ <trend-attribute key="sparkHeight" value="40"/>
+ <trend-attribute key="sparkChartRangeMin" value="0"/>
+ <trend-attribute key="sparkLineColor" value="#8f5902"/>
+ <trend-attribute key="sparkLineWidth" value="1.5"/>
+ <trend-attribute key="sparkFillColor" value="#e9b96e"/>
+ <trend-attribute key="sparkSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMinSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkMaxSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkSpotRadius" value="3"/>
+ <trend-attribute key="sparkHighlightSpotColor" value="#8f5902"/>
+ <trend-attribute key="sparkHighlightLineColor" value="#8f5902"/>
+ </trend-attributes>
+ <descriptionLink>topology?szl=1amp;layout=Hierarchy+Layout&amp;provider=Business+Services</descriptionLink>
+ <description>${intValue[23]} Current Problem(s)</description>
+ <query>
+ <![CDATA[
+ select (
+ select count(*) from alarms where (alarmacktime is null or alarmacktime > E) and firsteventtime <= E and eventuei='uei.opennms.org/bsm/serviceProblem'
+ ) from (
+ select now() - interval '1 hour' * (O + 1) AS S, now() - interval '1 hour' * O as E from generate_series(0, 23) as O
+ ) I order by S;
+ ]]>
+ </query>
+ </trend-definition>
+
+</trend-configuration>
diff --git a/users.xml b/users.xml
index d5ada64..c919ffd 100644
--- a/users.xml
+++ b/users.xml
@@ -11,6 +11,14 @@
<full-name>Administrator</full-name>
<user-comments>Default administrator, do not delete</user-comments>
<password>21232F297A57A5A743894A0E4A801FC3</password>
+ <role>ROLE_ADMIN</role>
+ </user>
+ <user>
+ <user-id>rtc</user-id>
+ <full-name>RTC</full-name>
+ <user-comments>RTC user, do not delete</user-comments>
+ <password>68154466F81BFB532CD70F8C71426356</password>
+ <role>ROLE_RTC</role>
</user>
</users>
</userinfo>
diff --git a/vmware-datacollection-config.xml b/vmware-datacollection-config.xml
index 0b2cc03..6a93515 100644
--- a/vmware-datacollection-config.xml
+++ b/vmware-datacollection-config.xml
@@ -744,4 +744,383 @@
</vmware-group>
</vmware-groups>
</vmware-collection>
+
+ <!--
+ Configuration file generated for:
+
+ Full name.......: VMware vCenter Server 6.0.0 build-3339084
+ API type........: VirtualCenter
+ API version.....: 6.0
+ Product name....: VMware VirtualCenter Server
+ Product version.: 6.0
+ OS type.........: linux-x64
+ -->
+
+ <vmware-collection name="default-VirtualMachine6">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <vmware-groups>
+ <vmware-group name="vmware6VrtDisk" resourceType="vmware6VrtDisk">
+ <attrib name="virtualDisk.largeSeeks.latest" alias="VrtDiskLeSsLat" type="Gauge"/>
+ <attrib name="virtualDisk.mediumSeeks.latest" alias="VrtDiskMmSsLat" type="Gauge"/>
+ <attrib name="virtualDisk.numberReadAveraged.average" alias="VrtDiskNrRdAdAvg" type="Gauge"/>
+ <attrib name="virtualDisk.numberWriteAveraged.average" alias="VrtDiskNrWeAdAvg" type="Gauge"/>
+ <attrib name="virtualDisk.read.average" alias="VrtDiskRdAvg" type="Gauge"/>
+ <attrib name="virtualDisk.readIOSize.latest" alias="VrtDiskRdIOSizeLat" type="Gauge"/>
+ <attrib name="virtualDisk.readLoadMetric.latest" alias="VrtDiskRdLdMcLat" type="Gauge"/>
+ <attrib name="virtualDisk.readLatencyUS.latest" alias="VrtDiskRdLyUSLat" type="Gauge"/>
+ <attrib name="virtualDisk.readOIO.latest" alias="VrtDiskRdOIOLat" type="Gauge"/>
+ <attrib name="virtualDisk.smallSeeks.latest" alias="VrtDiskSlSsLat" type="Gauge"/>
+ <attrib name="virtualDisk.totalReadLatency.average" alias="VrtDiskTlRdLyAvg" type="Gauge"/>
+ <attrib name="virtualDisk.totalWriteLatency.average" alias="VrtDiskTlWeLyAvg" type="Gauge"/>
+ <attrib name="virtualDisk.write.average" alias="VrtDiskWeAvg" type="Gauge"/>
+ <attrib name="virtualDisk.writeIOSize.latest" alias="VrtDiskWeIOSizeLat" type="Gauge"/>
+ <attrib name="virtualDisk.writeLoadMetric.latest" alias="VrtDiskWeLdMcLat" type="Gauge"/>
+ <attrib name="virtualDisk.writeLatencyUS.latest" alias="VrtDiskWeLyUSLat" type="Gauge"/>
+ <attrib name="virtualDisk.writeOIO.latest" alias="VrtDiskWeOIOLat" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Node" resourceType="Node">
+ <attrib name="cpu.demand.average" alias="CpuDemandAvg" type="Gauge"/>
+ <attrib name="cpu.demandEntitlementRatio.latest" alias="CpuDmdEntRatioLat" type="Gauge"/>
+ <attrib name="cpu.entitlement.latest" alias="CpuEntitlementLat" type="Gauge"/>
+ <attrib name="cpu.latency.average" alias="CpuLyAvg" type="Gauge"/>
+ <attrib name="cpu.overlap.summation" alias="CpuOverlapSum" type="Gauge"/>
+ <attrib name="cpu.readiness.average" alias="CpuRdinessAvg" type="Gauge"/>
+ <attrib name="cpu.swapwait.summation" alias="CpuSpwaitSum" type="Gauge"/>
+ <attrib name="cpu.usage.average" alias="CpuUsageAvg" type="Gauge"/>
+ <attrib name="cpu.usagemhz.average" alias="CpuUsagemhzAvg" type="Gauge"/>
+ <attrib name="cpu.used.summation" alias="CpuUsedSum" type="Gauge"/>
+ <attrib name="cpu.wait.summation" alias="CpuWaitSum" type="Gauge"/>
+ <attrib name="datastore.maxTotalLatency.latest" alias="DaStMaxTlLyLat" type="Gauge"/>
+ <attrib name="disk.maxTotalLatency.latest" alias="DiskMaxTlLyLat" type="Gauge"/>
+ <attrib name="disk.read.average" alias="DiskRdAvg" type="Gauge"/>
+ <attrib name="disk.usage.average" alias="DiskUsageAvg" type="Gauge"/>
+ <attrib name="disk.write.average" alias="DiskWeAvg" type="Gauge"/>
+ <attrib name="mem.active.average" alias="MemAeAvg" type="Gauge"/>
+ <attrib name="mem.activewrite.average" alias="MemAeWeAvg" type="Gauge"/>
+ <attrib name="mem.consumed.average" alias="MemCdAvg" type="Gauge"/>
+ <attrib name="mem.compressionRate.average" alias="MemCnReAvg" type="Gauge"/>
+ <attrib name="mem.compressed.average" alias="MemCompressedAvg" type="Gauge"/>
+ <attrib name="mem.decompressionRate.average" alias="MemDnReAvg" type="Gauge"/>
+ <attrib name="mem.entitlement.average" alias="MemEntitlementAvg" type="Gauge"/>
+ <attrib name="mem.granted.average" alias="MemGrantedAvg" type="Gauge"/>
+ <attrib name="mem.llSwapInRate.average" alias="MemLlSpInReAvg" type="Gauge"/>
+ <attrib name="mem.llSwapOutRate.average" alias="MemLlSpOutReAvg" type="Gauge"/>
+ <attrib name="mem.llSwapUsed.average" alias="MemLlSpUsedAvg" type="Gauge"/>
+ <attrib name="mem.latency.average" alias="MemLyAvg" type="Gauge"/>
+ <attrib name="mem.overhead.average" alias="MemOdAvg" type="Gauge"/>
+ <attrib name="mem.overheadMax.average" alias="MemOdMaxAvg" type="Gauge"/>
+ <attrib name="mem.overheadTouched.average" alias="MemOdTdAvg" type="Gauge"/>
+ <attrib name="mem.shared.average" alias="MemSharedAvg" type="Gauge"/>
+ <attrib name="mem.swaptarget.average" alias="MemSpTtAvg" type="Gauge"/>
+ <attrib name="mem.swapin.average" alias="MemSpinAvg" type="Gauge"/>
+ <attrib name="mem.swapinRate.average" alias="MemSpinReAvg" type="Gauge"/>
+ <attrib name="mem.swapout.average" alias="MemSpoutAvg" type="Gauge"/>
+ <attrib name="mem.swapoutRate.average" alias="MemSpoutReAvg" type="Gauge"/>
+ <attrib name="mem.swapped.average" alias="MemSppedAvg" type="Gauge"/>
+ <attrib name="mem.usage.average" alias="MemUsageAvg" type="Gauge"/>
+ <attrib name="mem.vmmemctl.average" alias="MemVmmemctlAvg" type="Gauge"/>
+ <attrib name="mem.vmmemctltarget.average" alias="MemVmmemctlTtAvg" type="Gauge"/>
+ <attrib name="mem.zero.average" alias="MemZeroAvg" type="Gauge"/>
+ <attrib name="mem.zipSaved.latest" alias="MemZipSavedLat" type="Gauge"/>
+ <attrib name="mem.zipped.latest" alias="MemZippedLat" type="Gauge"/>
+ <attrib name="net.broadcastRx.summation" alias="NetBroadcastRxSum" type="Gauge"/>
+ <attrib name="net.broadcastTx.summation" alias="NetBroadcastTxSum" type="Gauge"/>
+ <attrib name="net.droppedRx.summation" alias="NetDroppedRxSum" type="Gauge"/>
+ <attrib name="net.droppedTx.summation" alias="NetDroppedTxSum" type="Gauge"/>
+ <attrib name="net.packetsTx.summation" alias="NetPacketsTxSum" type="Gauge"/>
+ <attrib name="net.received.average" alias="NetReceivedAvg" type="Gauge"/>
+ <attrib name="net.transmitted.average" alias="NetTransmittedAvg" type="Gauge"/>
+ <attrib name="power.energy.summation" alias="PowerEnergySum" type="Gauge"/>
+ <attrib name="power.power.average" alias="PowerPowerAvg" type="Gauge"/>
+ <attrib name="rescpu.actav15.latest" alias="ResCpuActav15Lat" type="Gauge"/>
+ <attrib name="rescpu.actav1.latest" alias="ResCpuActav1Lat" type="Gauge"/>
+ <attrib name="rescpu.actav5.latest" alias="ResCpuActav5Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk15.latest" alias="ResCpuActpk15Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk1.latest" alias="ResCpuActpk1Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk5.latest" alias="ResCpuActpk5Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited15.latest" alias="ResCpuMaxLd15Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited1.latest" alias="ResCpuMaxLd1Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited5.latest" alias="ResCpuMaxLd5Lat" type="Gauge"/>
+ <attrib name="rescpu.runav15.latest" alias="ResCpuRunav15Lat" type="Gauge"/>
+ <attrib name="rescpu.runav1.latest" alias="ResCpuRunav1Lat" type="Gauge"/>
+ <attrib name="rescpu.runav5.latest" alias="ResCpuRunav5Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk15.latest" alias="ResCpuRunpk15Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk1.latest" alias="ResCpuRunpk1Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk5.latest" alias="ResCpuRunpk5Lat" type="Gauge"/>
+ <attrib name="rescpu.sampleCount.latest" alias="ResCpuSeCtLat" type="Gauge"/>
+ <attrib name="rescpu.samplePeriod.latest" alias="ResCpuSePeriodLat" type="Gauge"/>
+ <attrib name="sys.heartbeat.latest" alias="SysHeartbeatLat" type="Gauge"/>
+ <attrib name="sys.osUptime.latest" alias="SysOsUpTeLat" type="Gauge"/>
+ <attrib name="sys.uptime.latest" alias="SysUpTeLat" type="Gauge"/>
+ <attrib name="virtualDisk.read.average" alias="VrtDiskRdAvg" type="Gauge"/>
+ <attrib name="virtualDisk.write.average" alias="VrtDiskWeAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Cpu" resourceType="vmware6Cpu">
+ <attrib name="cpu.costop.summation" alias="CpuCostopSum" type="Gauge"/>
+ <attrib name="cpu.idle.summation" alias="CpuIdleSum" type="Gauge"/>
+ <attrib name="cpu.maxlimited.summation" alias="CpuMaxLdSum" type="Gauge"/>
+ <attrib name="cpu.overlap.summation" alias="CpuOverlapSum" type="Gauge"/>
+ <attrib name="cpu.readiness.average" alias="CpuRdinessAvg" type="Gauge"/>
+ <attrib name="cpu.ready.summation" alias="CpuRdySum" type="Gauge"/>
+ <attrib name="cpu.run.summation" alias="CpuRunSum" type="Gauge"/>
+ <attrib name="cpu.swapwait.summation" alias="CpuSpwaitSum" type="Gauge"/>
+ <attrib name="cpu.system.summation" alias="CpuSystemSum" type="Gauge"/>
+ <attrib name="cpu.usagemhz.average" alias="CpuUsagemhzAvg" type="Gauge"/>
+ <attrib name="cpu.used.summation" alias="CpuUsedSum" type="Gauge"/>
+ <attrib name="cpu.wait.summation" alias="CpuWaitSum" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Net" resourceType="vmware6Net">
+ <attrib name="net.broadcastRx.summation" alias="NetBroadcastRxSum" type="Gauge"/>
+ <attrib name="net.broadcastTx.summation" alias="NetBroadcastTxSum" type="Gauge"/>
+ <attrib name="net.bytesRx.average" alias="NetBytesRxAvg" type="Gauge"/>
+ <attrib name="net.bytesTx.average" alias="NetBytesTxAvg" type="Gauge"/>
+ <attrib name="net.droppedRx.summation" alias="NetDroppedRxSum" type="Gauge"/>
+ <attrib name="net.droppedTx.summation" alias="NetDroppedTxSum" type="Gauge"/>
+ <attrib name="net.multicastRx.summation" alias="NetMulticastRxSum" type="Gauge"/>
+ <attrib name="net.multicastTx.summation" alias="NetMulticastTxSum" type="Gauge"/>
+ <attrib name="net.packetsRx.summation" alias="NetPacketsRxSum" type="Gauge"/>
+ <attrib name="net.packetsTx.summation" alias="NetPacketsTxSum" type="Gauge"/>
+ <attrib name="net.received.average" alias="NetReceivedAvg" type="Gauge"/>
+ <attrib name="net.transmitted.average" alias="NetTransmittedAvg" type="Gauge"/>
+ <attrib name="net.usage.average" alias="NetUsageAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Disk" resourceType="vmware6Disk">
+ <attrib name="disk.busResets.summation" alias="DiskBusResetsSum" type="Gauge"/>
+ <attrib name="disk.commandsAveraged.average" alias="DiskCsAdAvg" type="Gauge"/>
+ <attrib name="disk.commandsAborted.summation" alias="DiskCsAdSum" type="Gauge"/>
+ <attrib name="disk.commands.summation" alias="DiskCsSum" type="Gauge"/>
+ <attrib name="disk.numberReadAveraged.average" alias="DiskNrRdAdAvg" type="Gauge"/>
+ <attrib name="disk.numberRead.summation" alias="DiskNrRdSum" type="Gauge"/>
+ <attrib name="disk.numberWriteAveraged.average" alias="DiskNrWeAdAvg" type="Gauge"/>
+ <attrib name="disk.numberWrite.summation" alias="DiskNrWeSum" type="Gauge"/>
+ <attrib name="disk.read.average" alias="DiskRdAvg" type="Gauge"/>
+ <attrib name="disk.write.average" alias="DiskWeAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6DaSt" resourceType="vmware6DaSt">
+ <attrib name="datastore.numberReadAveraged.average" alias="DaStNrRdAdAvg" type="Gauge"/>
+ <attrib name="datastore.numberWriteAveraged.average" alias="DaStNrWeAdAvg" type="Gauge"/>
+ <attrib name="datastore.read.average" alias="DaStRdAvg" type="Gauge"/>
+ <attrib name="datastore.totalReadLatency.average" alias="DaStTlRdLyAvg" type="Gauge"/>
+ <attrib name="datastore.totalWriteLatency.average" alias="DaStTlWeLyAvg" type="Gauge"/>
+ <attrib name="datastore.write.average" alias="DaStWeAvg" type="Gauge"/>
+ </vmware-group>
+ </vmware-groups>
+ </vmware-collection>
+ <vmware-collection name="default-HostSystem6">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <vmware-groups>
+ <vmware-group name="vmware6StAdptr" resourceType="vmware6StAdptr">
+ <attrib name="storageAdapter.commandsAveraged.average" alias="StAdptrCsAdAvg" type="Gauge"/>
+ <attrib name="storageAdapter.numberReadAveraged.average" alias="StAdptrNrRdAdAvg" type="Gauge"/>
+ <attrib name="storageAdapter.numberWriteAveraged.average" alias="StAdptrNrWeAdAvg" type="Gauge"/>
+ <attrib name="storageAdapter.read.average" alias="StAdptrRdAvg" type="Gauge"/>
+ <attrib name="storageAdapter.totalReadLatency.average" alias="StAdptrTlRdLyAvg" type="Gauge"/>
+ <attrib name="storageAdapter.totalWriteLatency.average" alias="StAdptrTlWeLyAvg" type="Gauge"/>
+ <attrib name="storageAdapter.write.average" alias="StAdptrWeAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6StPth" resourceType="vmware6StPth">
+ <attrib name="storagePath.commandsAveraged.average" alias="StPthCsAdAvg" type="Gauge"/>
+ <attrib name="storagePath.numberReadAveraged.average" alias="StPthNrRdAdAvg" type="Gauge"/>
+ <attrib name="storagePath.numberWriteAveraged.average" alias="StPthNrWeAdAvg" type="Gauge"/>
+ <attrib name="storagePath.read.average" alias="StPthRdAvg" type="Gauge"/>
+ <attrib name="storagePath.totalReadLatency.average" alias="StPthTlRdLyAvg" type="Gauge"/>
+ <attrib name="storagePath.totalWriteLatency.average" alias="StPthTlWeLyAvg" type="Gauge"/>
+ <attrib name="storagePath.write.average" alias="StPthWeAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Node" resourceType="Node">
+ <attrib name="cpu.costop.summation" alias="CpuCostopSum" type="Gauge"/>
+ <attrib name="cpu.demand.average" alias="CpuDemandAvg" type="Gauge"/>
+ <attrib name="cpu.latency.average" alias="CpuLyAvg" type="Gauge"/>
+ <attrib name="cpu.reservedCapacity.average" alias="CpuRdCyAvg" type="Gauge"/>
+ <attrib name="cpu.readiness.average" alias="CpuRdinessAvg" type="Gauge"/>
+ <attrib name="cpu.ready.summation" alias="CpuRdySum" type="Gauge"/>
+ <attrib name="cpu.swapwait.summation" alias="CpuSpwaitSum" type="Gauge"/>
+ <attrib name="cpu.totalCapacity.average" alias="CpuTlCyAvg" type="Gauge"/>
+ <attrib name="cpu.usagemhz.average" alias="CpuUsagemhzAvg" type="Gauge"/>
+ <attrib name="cpu.wait.summation" alias="CpuWaitSum" type="Gauge"/>
+ <attrib name="datastore.maxTotalLatency.latest" alias="DaStMaxTlLyLat" type="Gauge"/>
+ <attrib name="disk.maxTotalLatency.latest" alias="DiskMaxTlLyLat" type="Gauge"/>
+ <attrib name="disk.usage.average" alias="DiskUsageAvg" type="Gauge"/>
+ <attrib name="hbr.hbrNetRx.average" alias="HbrHbrNetRxAvg" type="Gauge"/>
+ <attrib name="hbr.hbrNetTx.average" alias="HbrHbrNetTxAvg" type="Gauge"/>
+ <attrib name="hbr.hbrNumVms.average" alias="HbrHbrNumVmsAvg" type="Gauge"/>
+ <attrib name="mem.active.average" alias="MemAeAvg" type="Gauge"/>
+ <attrib name="mem.activewrite.average" alias="MemAeWeAvg" type="Gauge"/>
+ <attrib name="mem.consumed.average" alias="MemCdAvg" type="Gauge"/>
+ <attrib name="mem.compressionRate.average" alias="MemCnReAvg" type="Gauge"/>
+ <attrib name="mem.compressed.average" alias="MemCompressedAvg" type="Gauge"/>
+ <attrib name="mem.decompressionRate.average" alias="MemDnReAvg" type="Gauge"/>
+ <attrib name="mem.granted.average" alias="MemGrantedAvg" type="Gauge"/>
+ <attrib name="mem.heap.average" alias="MemHeapAvg" type="Gauge"/>
+ <attrib name="mem.heapfree.average" alias="MemHeapfreeAvg" type="Gauge"/>
+ <attrib name="mem.llSwapIn.average" alias="MemLlSpInAvg" type="Gauge"/>
+ <attrib name="mem.llSwapInRate.average" alias="MemLlSpInReAvg" type="Gauge"/>
+ <attrib name="mem.llSwapOut.average" alias="MemLlSpOutAvg" type="Gauge"/>
+ <attrib name="mem.llSwapOutRate.average" alias="MemLlSpOutReAvg" type="Gauge"/>
+ <attrib name="mem.llSwapUsed.average" alias="MemLlSpUsedAvg" type="Gauge"/>
+ <attrib name="mem.lowfreethreshold.average" alias="MemLowfreeTdAvg" type="Gauge"/>
+ <attrib name="mem.latency.average" alias="MemLyAvg" type="Gauge"/>
+ <attrib name="mem.overhead.average" alias="MemOdAvg" type="Gauge"/>
+ <attrib name="mem.reservedCapacity.average" alias="MemRdCyAvg" type="Gauge"/>
+ <attrib name="mem.shared.average" alias="MemSharedAvg" type="Gauge"/>
+ <attrib name="mem.sharedcommon.average" alias="MemSharedcommonAvg" type="Gauge"/>
+ <attrib name="mem.swapin.average" alias="MemSpinAvg" type="Gauge"/>
+ <attrib name="mem.swapinRate.average" alias="MemSpinReAvg" type="Gauge"/>
+ <attrib name="mem.swapout.average" alias="MemSpoutAvg" type="Gauge"/>
+ <attrib name="mem.swapoutRate.average" alias="MemSpoutReAvg" type="Gauge"/>
+ <attrib name="mem.swapused.average" alias="MemSpusedAvg" type="Gauge"/>
+ <attrib name="mem.state.latest" alias="MemStateLat" type="Gauge"/>
+ <attrib name="mem.sysUsage.average" alias="MemSysUsageAvg" type="Gauge"/>
+ <attrib name="mem.totalCapacity.average" alias="MemTlCyAvg" type="Gauge"/>
+ <attrib name="mem.unreserved.average" alias="MemUdAvg" type="Gauge"/>
+ <attrib name="mem.usage.average" alias="MemUsageAvg" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.overhead.latest" alias="MemVmfsPbcOdLat" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.capMissRatio.latest" alias="MemVmfsPbcCpMsRtiLt" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.size.latest" alias="MemVmfsPbcSizeLat" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.sizeMax.latest" alias="MemVmfsPbcSizMaxLat" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.workingSet.latest" alias="MemVmfsPbcWrkSetLat" type="Gauge"/>
+ <attrib name="mem.vmfs.pbc.workingSetMax.latest" alias="MemVmfsPbcWrkStMxLt" type="Gauge"/>
+ <attrib name="mem.vmmemctl.average" alias="MemVmmemctlAvg" type="Gauge"/>
+ <attrib name="mem.zero.average" alias="MemZeroAvg" type="Gauge"/>
+ <attrib name="net.broadcastTx.summation" alias="NetBroadcastTxSum" type="Gauge"/>
+ <attrib name="net.droppedRx.summation" alias="NetDroppedRxSum" type="Gauge"/>
+ <attrib name="net.multicastRx.summation" alias="NetMulticastRxSum" type="Gauge"/>
+ <attrib name="net.received.average" alias="NetReceivedAvg" type="Gauge"/>
+ <attrib name="power.energy.summation" alias="PowerEnergySum" type="Gauge"/>
+ <attrib name="power.power.average" alias="PowerPowerAvg" type="Gauge"/>
+ <attrib name="power.powerCap.average" alias="PowerPowerCapAvg" type="Gauge"/>
+ <attrib name="rescpu.actav15.latest" alias="ResCpuActav15Lat" type="Gauge"/>
+ <attrib name="rescpu.actav1.latest" alias="ResCpuActav1Lat" type="Gauge"/>
+ <attrib name="rescpu.actav5.latest" alias="ResCpuActav5Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk15.latest" alias="ResCpuActpk15Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk1.latest" alias="ResCpuActpk1Lat" type="Gauge"/>
+ <attrib name="rescpu.actpk5.latest" alias="ResCpuActpk5Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited15.latest" alias="ResCpuMaxLd15Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited1.latest" alias="ResCpuMaxLd1Lat" type="Gauge"/>
+ <attrib name="rescpu.maxLimited5.latest" alias="ResCpuMaxLd5Lat" type="Gauge"/>
+ <attrib name="rescpu.runav15.latest" alias="ResCpuRunav15Lat" type="Gauge"/>
+ <attrib name="rescpu.runav1.latest" alias="ResCpuRunav1Lat" type="Gauge"/>
+ <attrib name="rescpu.runav5.latest" alias="ResCpuRunav5Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk15.latest" alias="ResCpuRunpk15Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk1.latest" alias="ResCpuRunpk1Lat" type="Gauge"/>
+ <attrib name="rescpu.runpk5.latest" alias="ResCpuRunpk5Lat" type="Gauge"/>
+ <attrib name="rescpu.sampleCount.latest" alias="ResCpuSeCtLat" type="Gauge"/>
+ <attrib name="rescpu.samplePeriod.latest" alias="ResCpuSePeriodLat" type="Gauge"/>
+ <attrib name="storageAdapter.maxTotalLatency.latest" alias="StAdptrMaxTlLyLat" type="Gauge"/>
+ <attrib name="storagePath.maxTotalLatency.latest" alias="StPthMaxTlLyLat" type="Gauge"/>
+ <attrib name="sys.uptime.latest" alias="SysUpTeLat" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Cpu" resourceType="vmware6Cpu">
+ <attrib name="cpu.coreUtilization.average" alias="CpuCoreUnAvg" type="Gauge"/>
+ <attrib name="cpu.idle.summation" alias="CpuIdleSum" type="Gauge"/>
+ <attrib name="cpu.utilization.average" alias="CpuUnAvg" type="Gauge"/>
+ <attrib name="cpu.usage.average" alias="CpuUsageAvg" type="Gauge"/>
+ <attrib name="cpu.used.summation" alias="CpuUsedSum" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Sys" resourceType="vmware6Sys">
+ <attrib name="sys.resourceCpuAllocMin.latest" alias="SysReCpuAcMinLat" type="Gauge"/>
+ <attrib name="sys.resourceCpuAllocShares.latest" alias="SysReCpuAcSsLat" type="Gauge"/>
+ <attrib name="sys.resourceCpuAct1.latest" alias="SysReCpuAct1Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuAct5.latest" alias="SysReCpuAct5Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuMaxLimited1.latest" alias="SysReCpuMaxLd1Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuMaxLimited5.latest" alias="SysReCpuMaxLd5Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuRun1.latest" alias="SysReCpuRun1Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuRun5.latest" alias="SysReCpuRun5Lat" type="Gauge"/>
+ <attrib name="sys.resourceCpuUsage.average" alias="SysReCpuUsageAvg" type="Gauge"/>
+ <attrib name="sys.resourceFdUsage.latest" alias="SysReFdUsageLat" type="Gauge"/>
+ <attrib name="sys.resourceMemAllocMax.latest" alias="SysReMemAcMaxLat" type="Gauge"/>
+ <attrib name="sys.resourceMemAllocMin.latest" alias="SysReMemAcMinLat" type="Gauge"/>
+ <attrib name="sys.resourceMemAllocShares.latest" alias="SysReMemAcSsLat" type="Gauge"/>
+ <attrib name="sys.resourceMemConsumed.latest" alias="SysReMemCdLat" type="Gauge"/>
+ <attrib name="sys.resourceMemCow.latest" alias="SysReMemCowLat" type="Gauge"/>
+ <attrib name="sys.resourceMemMapped.latest" alias="SysReMemMappedLat" type="Gauge"/>
+ <attrib name="sys.resourceMemOverhead.latest" alias="SysReMemOdLat" type="Gauge"/>
+ <attrib name="sys.resourceMemShared.latest" alias="SysReMemSharedLat" type="Gauge"/>
+ <attrib name="sys.resourceMemSwapped.latest" alias="SysReMemSppedLat" type="Gauge"/>
+ <attrib name="sys.resourceMemTouched.latest" alias="SysReMemTdLat" type="Gauge"/>
+ <attrib name="sys.resourceMemZero.latest" alias="SysReMemZeroLat" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Net" resourceType="vmware6Net">
+ <attrib name="net.broadcastRx.summation" alias="NetBroadcastRxSum" type="Gauge"/>
+ <attrib name="net.broadcastTx.summation" alias="NetBroadcastTxSum" type="Gauge"/>
+ <attrib name="net.bytesRx.average" alias="NetBytesRxAvg" type="Gauge"/>
+ <attrib name="net.bytesTx.average" alias="NetBytesTxAvg" type="Gauge"/>
+ <attrib name="net.droppedRx.summation" alias="NetDroppedRxSum" type="Gauge"/>
+ <attrib name="net.droppedTx.summation" alias="NetDroppedTxSum" type="Gauge"/>
+ <attrib name="net.errorsRx.summation" alias="NetErrorsRxSum" type="Gauge"/>
+ <attrib name="net.errorsTx.summation" alias="NetErrorsTxSum" type="Gauge"/>
+ <attrib name="net.multicastRx.summation" alias="NetMulticastRxSum" type="Gauge"/>
+ <attrib name="net.multicastTx.summation" alias="NetMulticastTxSum" type="Gauge"/>
+ <attrib name="net.packetsRx.summation" alias="NetPacketsRxSum" type="Gauge"/>
+ <attrib name="net.packetsTx.summation" alias="NetPacketsTxSum" type="Gauge"/>
+ <attrib name="net.received.average" alias="NetReceivedAvg" type="Gauge"/>
+ <attrib name="net.transmitted.average" alias="NetTransmittedAvg" type="Gauge"/>
+ <attrib name="net.unknownProtos.summation" alias="NetUnknownPsSum" type="Gauge"/>
+ <attrib name="net.usage.average" alias="NetUsageAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6Disk" resourceType="vmware6Disk">
+ <attrib name="disk.busResets.summation" alias="DiskBusResetsSum" type="Gauge"/>
+ <attrib name="disk.commandsAveraged.average" alias="DiskCsAdAvg" type="Gauge"/>
+ <attrib name="disk.commandsAborted.summation" alias="DiskCsAdSum" type="Gauge"/>
+ <attrib name="disk.commands.summation" alias="DiskCsSum" type="Gauge"/>
+ <attrib name="disk.deviceLatency.average" alias="DiskDeLyAvg" type="Gauge"/>
+ <attrib name="disk.deviceReadLatency.average" alias="DiskDeRdLyAvg" type="Gauge"/>
+ <attrib name="disk.deviceWriteLatency.average" alias="DiskDeWeLyAvg" type="Gauge"/>
+ <attrib name="disk.kernelLatency.average" alias="DiskKlLyAvg" type="Gauge"/>
+ <attrib name="disk.kernelReadLatency.average" alias="DiskKlRdLyAvg" type="Gauge"/>
+ <attrib name="disk.kernelWriteLatency.average" alias="DiskKlWeLyAvg" type="Gauge"/>
+ <attrib name="disk.maxQueueDepth.average" alias="DiskMaxQeDhAvg" type="Gauge"/>
+ <attrib name="disk.numberReadAveraged.average" alias="DiskNrRdAdAvg" type="Gauge"/>
+ <attrib name="disk.numberRead.summation" alias="DiskNrRdSum" type="Gauge"/>
+ <attrib name="disk.numberWriteAveraged.average" alias="DiskNrWeAdAvg" type="Gauge"/>
+ <attrib name="disk.numberWrite.summation" alias="DiskNrWeSum" type="Gauge"/>
+ <attrib name="disk.queueLatency.average" alias="DiskQeLyAvg" type="Gauge"/>
+ <attrib name="disk.queueReadLatency.average" alias="DiskQeRdLyAvg" type="Gauge"/>
+ <attrib name="disk.queueWriteLatency.average" alias="DiskQeWeLyAvg" type="Gauge"/>
+ <attrib name="disk.read.average" alias="DiskRdAvg" type="Gauge"/>
+ <attrib name="disk.totalLatency.average" alias="DiskTlLyAvg" type="Gauge"/>
+ <attrib name="disk.totalReadLatency.average" alias="DiskTlRdLyAvg" type="Gauge"/>
+ <attrib name="disk.totalWriteLatency.average" alias="DiskTlWeLyAvg" type="Gauge"/>
+ <attrib name="disk.write.average" alias="DiskWeAvg" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6vflashModule" resourceType="vmware6vflashModule">
+ <attrib name="vflashModule.numActiveVMDKs.latest" alias="vflModNumAeVMDKsLat" type="Gauge"/>
+ </vmware-group>
+ <vmware-group name="vmware6DaSt" resourceType="vmware6DaSt">
+ <attrib name="datastore.datastoreIops.average" alias="DaStDeIopsAvg" type="Gauge"/>
+ <attrib name="datastore.datastoreMaxQueueDepth.latest" alias="DaStDeMaxQeDhLat" type="Gauge"/>
+ <attrib name="datastore.datastoreNormalReadLatency.latest" alias="DaStDeNlRdLyLat" type="Gauge"/>
+ <attrib name="datastore.datastoreNormalWriteLatency.latest" alias="DaStDeNlWeLyLat" type="Gauge"/>
+ <attrib name="datastore.datastoreReadBytes.latest" alias="DaStDeRdBytesLat" type="Gauge"/>
+ <attrib name="datastore.datastoreReadIops.latest" alias="DaStDeRdIopsLat" type="Gauge"/>
+ <attrib name="datastore.datastoreReadLoadMetric.latest" alias="DaStDeRdLdMcLat" type="Gauge"/>
+ <attrib name="datastore.datastoreReadOIO.latest" alias="DaStDeRdOIOLat" type="Gauge"/>
+ <attrib name="datastore.datastoreVMObservedLatency.latest" alias="DaStDeVMOdLyLat" type="Gauge"/>
+ <attrib name="datastore.datastoreWriteBytes.latest" alias="DaStDeWeBytesLat" type="Gauge"/>
+ <attrib name="datastore.datastoreWriteIops.latest" alias="DaStDeWeIopsLat" type="Gauge"/>
+ <attrib name="datastore.datastoreWriteLoadMetric.latest" alias="DaStDeWeLdMcLat" type="Gauge"/>
+ <attrib name="datastore.datastoreWriteOIO.latest" alias="DaStDeWeOIOLat" type="Gauge"/>
+ <attrib name="datastore.numberReadAveraged.average" alias="DaStNrRdAdAvg" type="Gauge"/>
+ <attrib name="datastore.numberWriteAveraged.average" alias="DaStNrWeAdAvg" type="Gauge"/>
+ <attrib name="datastore.read.average" alias="DaStRdAvg" type="Gauge"/>
+ <attrib name="datastore.siocActiveTimePercentage.average" alias="DaStSiocAeTePeAvg" type="Gauge"/>
+ <attrib name="datastore.sizeNormalizedDatastoreLatency.average" alias="DaStSizeNdDeLyAvg" type="Gauge"/>
+ <attrib name="datastore.totalReadLatency.average" alias="DaStTlRdLyAvg" type="Gauge"/>
+ <attrib name="datastore.totalWriteLatency.average" alias="DaStTlWeLyAvg" type="Gauge"/>
+ <attrib name="datastore.write.average" alias="DaStWeAvg" type="Gauge"/>
+ </vmware-group>
+ </vmware-groups>
+ </vmware-collection>
</vmware-datacollection-config>
diff --git a/xml-datacollection-config.xml b/xml-datacollection-config.xml
new file mode 100644
index 0000000..5780949
--- /dev/null
+++ b/xml-datacollection-config.xml
@@ -0,0 +1,19 @@
+<xml-datacollection-config rrdRepository="/opt/opennms/share/rrd/snmp/" xmlns="http://xmlns.opennms.org/xsd/config/xml-datacollection">
+ <!--
+ To understand how the XML Collector works, and how to configure it, please check the following link:
+ http://www.opennms.org/wiki/XML_Collector
+ -->
+
+ <xml-collection name="xml-elasticsearch-cluster-stats">
+ <rrd step="300">
+ <rra>RRA:AVERAGE:0.5:1:2016</rra>
+ <rra>RRA:AVERAGE:0.5:12:1488</rra>
+ <rra>RRA:AVERAGE:0.5:288:366</rra>
+ <rra>RRA:MAX:0.5:288:366</rra>
+ <rra>RRA:MIN:0.5:288:366</rra>
+ </rrd>
+ <xml-source url="http://{ipaddr}:9200/_cluster/stats">
+ <import-groups>xml-datacollection/elasticsearch-cluster-stats.xml</import-groups>
+ </xml-source>
+ </xml-collection>
+</xml-datacollection-config>
diff --git a/xml-datacollection/elasticsearch-cluster-stats.xml b/xml-datacollection/elasticsearch-cluster-stats.xml
new file mode 100644
index 0000000..6659e11
--- /dev/null
+++ b/xml-datacollection/elasticsearch-cluster-stats.xml
@@ -0,0 +1,156 @@
+<xml-groups>
+ <!--
+ Elasticsearch Performance metrics.
+
+ The idea to define this on a external file is to share the content between multiple XML sources
+
+ IMPORTANT: because of the current limitation for the data source inside RRDs/JRBs of 19
+ characters, the XML objects are using a auto-generated ID to ensure the DS
+ length. The real variable name is displayed as a comment after the XML object
+ definition for information purposes. For graph templates use should refer to
+ the performance metrics using the XML Object Name (auto-generated ID)
+ -->
+ <xml-group name="cluster" resource-type="node" resource-xpath="/">
+ <xml-object name="clusterName" type="STRING" xpath="cluster_name"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices" resource-type="node" resource-xpath="/indices">
+ <xml-object name="indicesCount" type="GAUGE" xpath="count"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-shards-index-shards" resource-type="node" resource-xpath="/indices/shards/index/shards">
+ <xml-object name="shardsMin" type="GAUGE" xpath="min"/>
+ <xml-object name="shardsMax" type="GAUGE" xpath="max"/>
+ <xml-object name="shardsAvg" type="GAUGE" xpath="avg"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-shards-index-primaries" resource-type="node" resource-xpath="/indices/shards/index/primaries">
+ <xml-object name="primariesMin" type="GAUGE" xpath="min"/>
+ <xml-object name="primariesMax" type="GAUGE" xpath="max"/>
+ <xml-object name="primariesAvg" type="GAUGE" xpath="avg"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-shards-index-replication" resource-type="node" resource-xpath="/indices/shards/index/replication">
+ <xml-object name="replicationMin" type="GAUGE" xpath="min"/>
+ <xml-object name="replicationMax" type="GAUGE" xpath="max"/>
+ <xml-object name="replicationAvg" type="GAUGE" xpath="avg"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-docs" resource-type="node" resource-xpath="/indices/docs">
+ <xml-object name="docsCount" type="COUNTER" xpath="count"/>
+ <xml-object name="docsDeleted" type="COUNTER" xpath="deleted"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-store" resource-type="node" resource-xpath="/indices/store">
+ <xml-object name="storeSizeBytes" type="GAUGE" xpath="size_in_bytes"/>
+ <xml-object name="throttleTimeMillis" type="GAUGE" xpath="throttle_time_in_millis"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-fielddata" resource-type="node" resource-xpath="/indices/fielddata">
+ <xml-object name="memorySizeBytes" type="GAUGE" xpath="memory_size_in_bytes"/>
+ <xml-object name="memoryEvictions" type="GAUGE" xpath="evictions"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-filtercache" resource-type="node" resource-xpath="/indices/filter_cache">
+ <xml-object name="fltrCacheBytes" type="GAUGE" xpath="memory_size_in_bytes"/>
+ <xml-object name="fltrCacheEvictions" type="GAUGE" xpath="evictions"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-idcache" resource-type="node" resource-xpath="/indices/id_cache">
+ <xml-object name="idCacheBytes" type="GAUGE" xpath="memory_size_in_bytes"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-completion" resource-type="node" resource-xpath="/indices/completion">
+ <xml-object name="completionSizeBytes" type="GAUGE" xpath="memory_size_in_bytes"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-segments" resource-type="node" resource-xpath="/indices/segments">
+ <xml-object name="segCount" type="GAUGE" xpath="count"/>
+ <xml-object name="segMemory" type="GAUGE" xpath="memory_in_bytes"/>
+ <xml-object name="segWriterMem" type="GAUGE" xpath="index_writer_memory_in_bytes"/>
+ <xml-object name="segWriterMemMax" type="GAUGE" xpath="index_writer_max_memory_in_bytes"/>
+ <xml-object name="segVerMapMem" type="GAUGE" xpath="version_map_memory_in_bytes"/>
+ <xml-object name="segFixBitSetMem" type="GAUGE" xpath="fixed_bit_set_memory_in_bytes"/>
+ </xml-group>
+
+ <xml-group name="cluster-indices-percolate" resource-type="node" resource-xpath="/indices/percolate">
+ <xml-object name="percTotal" type="GAUGE" xpath="total"/>
+ <xml-object name="percTimeInMillis" type="GAUGE" xpath="time_in_millis"/>
+ <xml-object name="percCurrent" type="GAUGE" xpath="current"/>
+ <xml-object name="percMemSizeBytes" type="GAUGE" xpath="memory_size_in_bytes"/>
+ <xml-object name="percQueries" type="GAUGE" xpath="queries"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-count" resource-type="node" resource-xpath="/nodes/count">
+ <xml-object name="nodesCntTotal" type="GAUGE" xpath="total"/>
+ <xml-object name="nodesCntMstOnly" type="GAUGE" xpath="master_only"/>
+ <xml-object name="nodesCntDataOnly" type="GAUGE" xpath="data_only"/>
+ <xml-object name="nodesCntMasterData" type="GAUGE" xpath="master_data"/>
+ <xml-object name="nodesClient" type="GAUGE" xpath="client"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-os" resource-type="node" resource-xpath="/nodes/os">
+ <xml-object name="osAvailProc" type="GAUGE" xpath="available_processors"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-os-mem" resource-type="node" resource-xpath="/nodes/os/mem">
+ <xml-object name="osMemTotalBytes" type="GAUGE" xpath="total_in_bytes"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-os-cpu" resource-type="node" resource-xpath="/nodes/os/cpu">
+ <xml-object name="cpuVendor" type="STRING" xpath="vendor"/>
+ <xml-object name="cpuModel" type="STRING" xpath="model"/>
+ <xml-object name="cpuMhz" type="STRING" xpath="mhz"/>
+ <xml-object name="cpuCacheSize" type="STRING" xpath="cache_size"/>
+ <xml-object name="cpuTotalCores" type="GAUGE" xpath="total_cores"/>
+ <xml-object name="cpuTotalSockets" type="GAUGE" xpath="total_sockets"/>
+ <xml-object name="cpuCoresPerSocket" type="GAUGE" xpath="cores_per_socket"/>
+ <xml-object name="cpuCacheSizeBytes" type="STRING" xpath="cache_size_in_bytes"/>
+ <xml-object name="cpuCount" type="GAUGE" xpath="count"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-process-cpu" resource-type="node" resource-xpath="/nodes/process/cpu">
+ <xml-object name="procCpuPercent" type="GAUGE" xpath="percent"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-process-open-file-descriptors" resource-type="node" resource-xpath="/nodes/process/open_file_descriptors">
+ <xml-object name="openFileDescMin" type="GAUGE" xpath="min"/>
+ <xml-object name="openFileDescMax" type="GAUGE" xpath="max"/>
+ <xml-object name="openFileDescAvg" type="GAUGE" xpath="avg"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-jvm" resource-type="node" resource-xpath="/nodes/jvm">
+ <xml-object name="jvmMaxUptimeMillis" type="GAUGE" xpath="max_uptime_in_millis"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-jvm-versions" resource-type="node" resource-xpath="/nodes/jvm/versions">
+ <xml-object name="jvmVersion" type="STRING" xpath="version"/>
+ <xml-object name="jvmVmName" type="STRING" xpath="vm_name"/>
+ <xml-object name="jvmVmVersion" type="STRING" xpath="vm_version"/>
+ <xml-object name="jvmVmVendor" type="STRING" xpath="vm_vendor"/>
+ <xml-object name="jvmCount" type="GAUGE" xpath="count"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-jvm-mem" resource-type="node" resource-xpath="/nodes/jvm/mem">
+ <xml-object name="jvmMemHeapBytes" type="GAUGE" xpath="heap_used_in_bytes"/>
+ <xml-object name="jvmMemMaxBytes" type="GAUGE" xpath="heap_max_in_bytes"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-jvm-threads" resource-type="node" resource-xpath="/nodes/jvm">
+ <xml-object name="jvmThreads" type="GAUGE" xpath="threads"/>
+ </xml-group>
+
+ <xml-group name="cluster-nodes-fs" resource-type="node" resource-xpath="/nodes/fs">
+ <xml-object name="fsTotalBytes" type="GAUGE" xpath="total_in_bytes"/>
+ <xml-object name="fsFreeBytes" type="GAUGE" xpath="free_in_bytes"/>
+ <xml-object name="fsAvailBytes" type="GAUGE" xpath="available_in_bytes"/>
+ <xml-object name="fsDskReads" type="GAUGE" xpath="disk_reads"/>
+ <xml-object name="fsDskWrites" type="GAUGE" xpath="disk_writes"/>
+ <xml-object name="fsDskIoOp" type="GAUGE" xpath="disk_io_op"/>
+ <xml-object name="fsDskReadSizeBytes" type="GAUGE" xpath="disk_read_size_in_bytes"/>
+ <xml-object name="fsDskWriteSizeBytes" type="GAUGE" xpath="disk_write_size_in_bytes"/>
+ <xml-object name="fsDskIoSizeBytes" type="GAUGE" xpath="disk_io_size_in_bytes"/>
+ <xml-object name="fsDskQueue" type="GAUGE" xpath="disk_queue"/>
+ <xml-object name="fsDskSvcTime" type="GAUGE" xpath="disk_service_time"/>
+ </xml-group>
+</xml-groups>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment