Просмотр исходного кода

init

Signed-off-by: Jean-Michel Batto <jean-michel.batto@eldarsoft.com>
Jean-Michel Batto 3 месяцев назад
Родитель
Сommit
e38c81b22d
8 измененных файлов с 2275 добавлено и 0 удалено
  1. 188 0
      Dockerfile
  2. BIN
      cours4-GLCS-JMB-20242025.pdf
  3. 76 0
      docker-compose.yml
  4. 1 0
      ssh/config
  5. 27 0
      ssh/id_rsa.mpi
  6. 1 0
      ssh/id_rsa.mpi.pub
  7. 38 0
      supervisord.conf
  8. 1944 0
      telegraf.conf

+ 188 - 0
Dockerfile

@@ -0,0 +1,188 @@
+FROM debian:bullseye-slim
+# an image with OpenMPI4.1, used without root privileges for sshd
+# XMP and telegraf
+# ------------------------------------------------------------
+# Do basic install
+# ------------------------------------------------------------
+RUN apt-get update \
+    && mkdir -p /usr/share/man/man1 \
+    && apt-get install -y gcc ssh wget vim curl net-tools bison flex openjdk-11-jdk-headless autoconf make libtool m4 automake bzip2 libxml2 libxml2-dev gfortran g++ iputils-ping pkg-config colordiff nano git sudo lsof gawk emacs jq neofetch libtdl* astyle cmake gdb strace binutils-dev dnsutils netcat libgomp1 googletest supervisor \
+    && adduser --uid 1000 --home /home/mpiuser --shell /bin/bash \
+       --disabled-password --gecos '' mpiuser \
+    && passwd -d mpiuser \
+    && apt-get install -y openssh-server \
+    && mkdir -p /run/sshd /home/mpiuser/.ssh /home/mpiuser/.ssh-source \
+    && echo "StrictHostKeyChecking no" > /home/mpiuser/.ssh/config \
+    && chown -R mpiuser /home/mpiuser \
+    && sed -i s/#PermitRootLogin.*/PermitRootLogin\ no/ /etc/ssh/sshd_config \
+    && sed -i s/#PubkeyAuthentication.*/PubkeyAuthentication\ no/ /etc/ssh/sshd_config \
+    && sed -i s/.*UsePAM.*/UsePAM\ no/ /etc/ssh/sshd_config \
+    && sed -i s/#PasswordAuthentication.*/PasswordAuthentication\ yes/ /etc/ssh/sshd_config \
+    && sed -i s/#PermitEmptyPasswords.*/PermitEmptyPasswords\ yes/ /etc/ssh/sshd_config \
+    && sed -i s/#ChallengeResponse.*/ChallengeResponseAuthentication\ no/ /etc/ssh/sshd_config \
+    && sed -i s/#PermitUserEnvironment.*/PermitUserEnvironment\ yes/ /etc/ssh/sshd_config \
+	&& adduser mpiuser sudo
+
+ENV PREFIX=/usr/local \
+	OPENMPI_VERSION=4.1.4 \
+    LD_LIBRARY_PATH=/usr/local/lib \
+    DEBCONF_NOWARNINGS=yes
+
+# ------------------------------------------------------------
+# Install OpenMPI 4.1
+# https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.4.tar.gz
+# ------------------------------------------------------------
+
+# OpenMPI v4.1
+RUN repo="https://download.open-mpi.org/release/open-mpi/v4.1" \
+    && curl --location --silent --show-error --output openmpi.tar.gz \
+      "${repo}/openmpi-${OPENMPI_VERSION}.tar.gz" \
+    && tar xzf openmpi.tar.gz -C /tmp/ \
+    && cd /tmp/openmpi-${OPENMPI_VERSION} \
+	&& env CFLAGS="-O2 -std=gnu99 -fopenmp" \
+    && ./configure --prefix=${PREFIX} \
+    && make \
+    && make install \
+    && ldconfig \
+    && cd / \
+    && rm -rf /tmp/openmpi-${OPENMPI_VERSION} /home/mpiuser/openmpi.tar.gz
+
+# ------------------------------------------------------------
+# Add some parameters for MPI, mpishare - a folder shared through the nodes
+# ------------------------------------------------------------	
+RUN mkdir -p /usr/local/var/mpishare
+
+RUN chown -R 1000:1000 /usr/local/var/mpishare
+
+RUN echo "mpiuser ALL=(ALL) NOPASSWD:ALL\n" >> /etc/sudoers
+
+RUN rm -fr /home/mpiuser/.openmpi && mkdir -p /home/mpiuser/.openmpi
+RUN cd /home/mpiuser/.openmpi \
+	&& echo "btl = tcp,self \n" \
+	"btl_tcp_if_include = eth0 \n" \
+	"plm_rsh_no_tree_spawn = 1 \n" >> default-mca-params.conf
+
+RUN chown -R 1000:1000 /home/mpiuser/.openmpi
+
+RUN echo "rmaps_base_oversubscribe = 1\n" >> /usr/local/etc/openmpi-mca-params.conf
+RUN echo "rmaps_base_inherit = 1\n" >> /usr/local/etc/openmpi-mca-params.conf
+
+
+# ------------------------------------------------------------
+# Start mpi python install / user mpiuser
+# ------------------------------------------------------------
+RUN apt-get install -y --no-install-recommends python3-dev python3-numpy python3-pip python3-virtualenv python3-scipy 2to3 \
+    && apt-get clean && apt-get purge && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+RUN pip install --upgrade pip
+
+# in order to have python related to mpiuser account
+USER mpiuser
+RUN  pip install --user -U setuptools \
+    && pip install --user mpi4py
+USER root
+
+
+# ------------------------------------------------------------
+# Copy MPI4PY example scripts
+# ------------------------------------------------------------
+
+
+
+ADD ./mpi4py_benchmarks /home/mpiuser/mpi4py_benchmarks
+RUN chown -R mpiuser:mpiuser /home/mpiuser/mpi4py_benchmarks
+RUN cd /home/mpiuser/mpi4py_benchmarks && 2to3 -w --no-diffs *.py
+
+
+
+# ------------------------------------------------------------
+# Load omni-compiler 1.3.4 from a public repo
+# ------------------------------------------------------------
+RUN mkdir -p /home/mpiuser/YMLEnvironment
+WORKDIR /home/mpiuser/YMLEnvironment
+RUN wget --no-check-certificate --content-disposition https://omni-compiler.org/download/stable/omnicompiler-1.3.4.tar.bz2
+RUN bunzip2 omnicompiler-1.3.4.tar.bz2 \
+	&& tar xvf omnicompiler-1.3.4.tar \
+	&& rm /home/mpiuser/YMLEnvironment/omnicompiler-1.3.4.tar
+	
+# ------------------------------------------------------------
+# Install omnicompiler-1.3.4 : requested javac
+# ------------------------------------------------------------
+
+WORKDIR /home/mpiuser/YMLEnvironment/omnicompiler-1.3.4
+RUN cd /home/mpiuser/YMLEnvironment/omnicompiler-1.3.4 \
+	&& export FCFLAGS="-w -fallow-argument-mismatch -O2" \
+	&& export FFLAGS="-w -fallow-argument-mismatch -O2" \
+	&& export MPI_FCFLAGS="-fopenmp -fallow-argument-mismatch -O2" \	
+	&& export CPPFLAGS="-fallow-argument-mismatch -DOMNI_CPU_X86_64 -DOMNI_OS_LINUX -DGNU_SOURCE -D_REENTRANT" \
+	&& ./configure --prefix=${PREFIX} --with-libxml2=/usr \
+	&& make && make install && make clean && ldconfig
+
+# ------------------------------------------------------------
+# Install test-spawn-xmp
+# ------------------------------------------------------------
+USER mpiuser
+COPY test-spawn-xmp /home/mpiuser/YMLEnvironment/test-spawn-xmp
+USER root
+RUN chown -R mpiuser:mpiuser /home/mpiuser/YMLEnvironment
+
+# ------------------------------------------------------------
+# Install telegraf
+# ------------------------------------------------------------
+
+RUN apt-get update && apt-get install -y --no-install-recommends gpgv ca-certificates apt-transport-https gnupg lsb-release
+RUN wget -qO- https://repos.influxdata.com/influxdb.key | apt-key add -
+RUN echo "deb https://repos.influxdata.com/debian $(lsb_release -cs) stable"| tee /etc/apt/sources.list.d/influxdb.list
+RUN apt-get update && apt-get install -y --no-install-recommends telegraf && apt-get clean && apt-get purge && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+
+# ------------------------------------------------------------
+# The .ssh-source dir contains RSA keys - put in place with docker-compose
+# ------------------------------------------------------------
+
+
+RUN touch /home/mpiuser/.ssh-source/authorized_keys
+RUN touch /home/mpiuser/.ssh-source/id_rsa
+
+
+# ------------------------------------------------------------
+# Do SSHd parameter to enable mpiuser to run it
+# ------------------------------------------------------------
+RUN sed -i s/#UsePrivilegeSeparation.*/UsePrivilegeSeparation\ no/ /etc/ssh/sshd_config
+RUN mkdir -p /home/mpiuser/ssh
+RUN ssh-keygen -q -N "" -t dsa -f /home/mpiuser/ssh/ssh_host_dsa_key \
+	&& ssh-keygen -q -N "" -t rsa -b 4096 -f /home/mpiuser/ssh/ssh_host_rsa_key \
+	&& ssh-keygen -q -N "" -t ecdsa -f /home/mpiuser/ssh/ssh_host_ecdsa_key \
+	&& ssh-keygen -q -N "" -t ed25519 -f /home/mpiuser/ssh/ssh_host_ed25519_key
+
+RUN cp /etc/ssh/sshd_config /home/mpiuser/ssh/
+
+RUN sed -i s/#HostKey\ \\/etc\\/ssh/HostKey\ \\/home\\/mpiuser\\/ssh/ /home/mpiuser/ssh/sshd_config
+RUN sed -i s/#PidFile\ \\/var\\/run/PidFile\ \\/home\\/mpiuser\\/ssh/ /home/mpiuser/ssh/sshd_config
+RUN sed -i s/#LogLevel.*/LogLevel\ DEBUG3/ /home/mpiuser/ssh/sshd_config
+RUN sed -i s/PubkeyAuthentication\ no/PubkeyAuthentication\ yes/ /home/mpiuser/ssh/sshd_config
+
+RUN chown -R mpiuser:mpiuser /home/mpiuser/ssh
+
+# ------------------------------------------------------------
+# supervisord to run entrypoint
+# ------------------------------------------------------------
+COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
+
+# ------------------------------------------------------------
+# telegraf to run entrypoint
+# ------------------------------------------------------------
+COPY telegraf.conf /etc/telegraf/telegraf.conf
+
+EXPOSE 22 80 8125
+
+WORKDIR /home/mpiuser
+
+CMD cp -R /home/mpiuser/.ssh-source/* /home/mpiuser/.ssh \
+	&& chmod 700 /home/mpiuser/.ssh/authorized_keys \
+	&& chmod 700 /home/mpiuser/.ssh/id_rsa \
+	&& chown -R mpiuser:mpiuser /home/mpiuser/.ssh \
+	&& exec /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf
+#	&& exec /usr/sbin/sshd -D -f /home/mpiuser/ssh/sshd_config -E /tmp/sshd.log
+# do not detach (-D), log to stderr (-e)
+# CMD ["/usr/sbin/sshd", "-D", "-e"]

BIN
cours4-GLCS-JMB-20242025.pdf


+ 76 - 0
docker-compose.yml

@@ -0,0 +1,76 @@
+# format 3.x, validated on W10 - there is no "mode/uid/gid" for docker-compose 1.29/W10
+# you should provide your private/public RSA key in ./ssh dir (named id_rsa.mpi/id_rsa.mpi.pub)
+# docker swarm init, then you have to do // note version_mpinet is the name of the overlay
+# docker network create --driver=overlay --attachable yml_mpinet
+# with xmp and telegraf
+# Dec, 12, 2022 - Jean-Michel Batto
+version: "3.9"
+services:
+  grafana:
+    container_name: influxdb_local
+    image: philhawthorne/docker-influxdb-grafana:latest
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+      - usrlocalinfluxdb-foo:/var/lib/influxdb
+      - usrlocalgrafana-foo:/var/lib/grafana
+    ports: 
+      - "3003:3003"
+      - "3004:8083"
+      - "8086:8086"
+    networks:
+      - mpinet
+  mpihead:
+    image: jmbatto/m2chps-mpi41-xmp:latest
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+      - usrlocalvarmpi-foo:/usr/local/var/mpishare
+    shm_size: '512m'
+    ports: 
+      - "2022:22"
+    links: 
+      - mpinode
+    networks:
+      - mpinet
+    secrets:                   
+      - source: "id_rsa"
+        target: "/home/mpiuser/.ssh-source/id_rsa"
+      - source: "id_rsa_mpi_pub"
+        target: "/home/mpiuser/.ssh-source/id_rsa.pub"
+      - source: "authorized_keys"
+        target: "/home/mpiuser/.ssh-source/authorized_keys"
+    depends_on:
+      - grafana
+  mpinode:
+    image: jmbatto/m2chps-mpi41-xmp:latest
+    volumes:
+      - /var/run/docker.sock:/var/run/docker.sock
+      - usrlocalvarmpi-foo:/usr/local/var/mpishare
+    shm_size: '512m'
+    networks:
+      - mpinet
+    secrets:                   
+      - source: "id_rsa"
+        target: "/home/mpiuser/.ssh-source/id_rsa"
+      - source: "id_rsa_mpi_pub"
+        target: "/home/mpiuser/.ssh-source/id_rsa.pub"
+      - source: "authorized_keys"
+        target: "/home/mpiuser/.ssh-source/authorized_keys"
+    depends_on:
+      - grafana
+secrets:
+  id_rsa_mpi_pub:
+    file: ssh/id_rsa.mpi.pub
+  id_rsa:
+    file: ssh/id_rsa.mpi
+  authorized_keys:
+    file: ssh/id_rsa.mpi.pub
+networks:
+  mpinet:
+    external: true
+    name : yml_mpinet
+    driver: overlay  
+volumes:
+  usrlocalvarmpi-foo:
+  usrlocalgrafana-foo:
+  usrlocalinfluxdb-foo:
+    

+ 1 - 0
ssh/config

@@ -0,0 +1 @@
+StrictHostKeyChecking no

+ 27 - 0
ssh/id_rsa.mpi

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA7PWLZmgdJ508dD15T6+xqGDvL9Ehzo9SgsnN6xJ+qpUvvOi4
+1axW0AqR4MnPTg/uuvk+x4tUpuufOW4w22UTGjsdvmIVWa9ujLtcRiN3YPY+SU+Y
+O5FfqKg7r/hBn+/GMcSoffwSs7vVgmhBBnp/mJh2O1cOAFZEe98/47mbg3/kHBAk
+36NOQktaU3l48B38EhBTnjWfcEGm1HcTRPFxXV5Wiko6ZhKFEuHcTVKng4ROtUqE
+mgHyI0aB7TAxg4na0ejItsYWEPWGeDOw6ms/4MwylxNosWzHFPW9p4zgLCLNr+b6
+bDDfYKjXZflAuTQtQhLmJUwD9uuYLAijpSE2fQIDAQABAoIBADgcgRET8Gt0CV/B
+OtvKz/f+VEVvcWD3gWNlJDTZIVOFllNWjIZUlA4ZoqenQkbK8Q4nfV1FOht4yjCQ
+TlN1oMtiWk297i5Zo4UBzPzy4w774I39oh/g8dT/WXr2/5s+7SDV38xNh6Q2A34o
+79T35wUcfUrZ93/O7dKjb/6d8hx2FMha0wVKqY4lmG1lQE3bbx3kakec0PdvU5kO
+YHKlpqj3pMR7CpMa+4yL/iXFwWYmnK+uu+zw7JR7PwvH1CzrnvW438wjQ1QmYbSx
+mHHOE89X67Lsl5hn81qYWBhpwAlBwi1qscsE0cV9GcFyKqWFqZsj5coM9u3CRfvy
+lrWe1OUCgYEA+LBUFEd3Hxs4sFiYElJ8R9SAs1udaqPvAl01hTEijJLfYlMMVs/y
+rgNN7j22zjDak2f8QdyMJZX7EZdRmdYcHO0csYOwbYvalzcnwk+U3mxmdD3r4xSo
+DSvkJ70fogAqUlcVIg2re6fCmZVJQTvMQYTVEM8zQomJRt/Lb2esSfsCgYEA8+zv
+44aToe8uqiDs4w8guRW7LCDkTw4z4IVo9JUibIaPjaAs5bZEBXSB43EEywXCR75H
+fML0rU1PVvKh1rqcvZdVzm+XMWVr3asPk0sapaiHaTcmyZvJRDxxqbLFp0zRP1T6
+cCtXNFdHWU4KiuKrUi6cDyOKchpfkSZa4seiT+cCgYB+n4FgBfdQPlMB70oW4irn
+g/q32CjxuGCk6oKqu5bkzo+xB6obtavSEFqouIGQwO056tNVUY+GP7Rjg5GH663K
+yKw4cl3tmS0Gm43B8TVSfw03mKO3rrfWZQe5eCFYIg9qd26KNT2gK435FzsCXQkm
+PxUhhu6JrW/ZR2/U3Iur6wKBgADrWLAb1ryagSuE+j+U1AO+kDkHWrTtkcZ72jxp
+v3p3O11GSEUJXdJDcSXhTCpTuDq6/dv7hB6PFwh126RKicKxKlKf2wsFndV1Cpb8
+hnovW2tLGOtTmfuW2rrQAKyzvmolsNfxYd/BoHQ2thV16z1hDZeFA8WQUeHjKh6G
+sBbrAoGATdtQlaUxx4izua6k02ihkxx/cRYwDl2N8UDvDBHokS7vJFMX8b8NpsGg
+zMElnqSpu/pe/0UG7N2MtPF6uyMcX8AZzzcsRkiMkDvWJzYt8Jpf+Eyd/uryF+Yv
+yrXaOEY83tm6x/fny5ZaZmk8lNth7bfWywuTMkZLX3fYpWtIeE4=
+-----END RSA PRIVATE KEY-----

+ 1 - 0
ssh/id_rsa.mpi.pub

@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDs9YtmaB0nnTx0PXlPr7GoYO8v0SHOj1KCyc3rEn6qlS+86LjVrFbQCpHgyc9OD+66+T7Hi1Sm6585bjDbZRMaOx2+YhVZr26Mu1xGI3dg9j5JT5g7kV+oqDuv+EGf78YxxKh9/BKzu9WCaEEGen+YmHY7Vw4AVkR73z/juZuDf+QcECTfo05CS1pTeXjwHfwSEFOeNZ9wQabUdxNE8XFdXlaKSjpmEoUS4dxNUqeDhE61SoSaAfIjRoHtMDGDidrR6Mi2xhYQ9YZ4M7Dqaz/gzDKXE2ixbMcU9b2njOAsIs2v5vpsMN9gqNdl+UC5NC1CEuYlTAP265gsCKOlITZ9 oweidner@peahi

+ 38 - 0
supervisord.conf

@@ -0,0 +1,38 @@
+[supervisord]
+nodaemon=true
+user=root
+pidfile = /tmp/supervisord.pid
+logfile = /tmp/supervisord.log
+logfile_maxbytes = 10MB
+logfile_backups=10
+loglevel = debug
+
+ 
+[supervisorctl]
+serverurl = unix:///tmp/supervisor.sock
+ 
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
+ 
+
+
+[program:sshd]
+user=mpiuser
+umask=022
+environment=HOME="/home/mpiuser",USER="mpiuser"
+chown=mpiuser:mpiuser
+stdout_logfile=/tmp/sshd.out
+stderr_logfile=/tmp/sshd.err
+stdout_logfile_maxbytes=10MB
+autostart=true
+autorestart=true
+stdout_logfile_backups=10
+command=/usr/sbin/sshd -D -f /home/mpiuser/ssh/sshd_config -E /tmp/sshd.log
+
+[program:telegraf]
+user=root
+stdout_logfile_maxbytes=10MB
+autostart=true
+autorestart=false
+stdout_logfile_backups=10
+command=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d

+ 1944 - 0
telegraf.conf

@@ -0,0 +1,1944 @@
+# Telegraf Configuration
+#
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+#
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+#
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+#
+# Environment variables can be used anywhere in this config file, simply prepend
+# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
+# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
+
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # dc = "us-east-1" # will tag all metrics with dc=us-east-1
+  # rack = "1a"
+  ## Environment variables can be used as tags, and throughout the config file
+  # user = "$USER"
+
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "20s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = false
+
+  ## Telegraf will send metrics to outputs in batches of at
+  ## most metric_batch_size metrics.
+  metric_batch_size = 1000
+  ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
+  ## output, and will flush this buffer on a successful write. Oldest metrics
+  ## are dropped first when this buffer fills.
+  metric_buffer_limit = 1000
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "2s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "5s"
+
+  ## By default, precision will be set to the same timestamp order as the
+  ## collection interval, with the maximum being 1s.
+  ## Precision will NOT be used for service inputs, such as logparser and statsd.
+  ## Valid values are "ns", "us" (or "µs"), "ms", "s".
+  precision = ""
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = true
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+  ## If set to true, do no set the "host" tag in the telegraf agent.
+  omit_hostname = false
+
+
+###############################################################################
+#                            OUTPUT PLUGINS                                   #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  ## The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  ## Multiple urls can be specified as part of the same cluster,
+  ## this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://influxdb_local:8089"] # UDP endpoint example
+  urls = ["http://influxdb_local:8086"] # required
+  ## The target database for metrics (telegraf will create it if not exists).
+  database = "telegraf" # required
+
+  ## Retention policy to write to. Empty string writes to the default rp.
+  retention_policy = ""
+  ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
+  write_consistency = "any"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "35s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  ## Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
+
+
+# # Configuration for Amon Server to send metrics to.
+# [[outputs.amon]]
+#   ## Amon Server Key
+#   server_key = "my-server-key" # required.
+#
+#   ## Amon Instance URL
+#   amon_instance = "https://youramoninstance" # required
+#
+#   ## Connection timeout.
+#   # timeout = "5s"
+
+
+# # Configuration for the AMQP server to send metrics to
+# [[outputs.amqp]]
+#   ## AMQP url
+#   url = "amqp://localhost:5672/influxdb"
+#   ## AMQP exchange
+#   exchange = "telegraf"
+#   ## Auth method. PLAIN and EXTERNAL are supported
+#   # auth_method = "PLAIN"
+#   ## Telegraf tag to use as a routing key
+#   ##  ie, if this tag exists, it's value will be used as the routing key
+#   routing_tag = "host"
+#
+#   ## InfluxDB retention policy
+#   # retention_policy = "default"
+#   ## InfluxDB database
+#   # database = "telegraf"
+#   ## InfluxDB precision
+#   # precision = "s"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
+# # Configuration for AWS CloudWatch output.
+# [[outputs.cloudwatch]]
+#   ## Amazon REGION
+#   region = 'us-east-1'
+#
+#   ## Amazon Credentials
+#   ## Credentials are loaded in the following order
+#   ## 1) Assumed credentials via STS if role_arn is specified
+#   ## 2) explicit credentials from 'access_key' and 'secret_key'
+#   ## 3) shared profile from 'profile'
+#   ## 4) environment variables
+#   ## 5) shared credentials file
+#   ## 6) EC2 Instance Profile
+#   #access_key = ""
+#   #secret_key = ""
+#   #token = ""
+#   #role_arn = ""
+#   #profile = ""
+#   #shared_credential_file = ""
+#
+#   ## Namespace for the CloudWatch MetricDatums
+#   namespace = 'InfluxData/Telegraf'
+
+
+# # Configuration for DataDog API to send metrics to.
+# [[outputs.datadog]]
+#   ## Datadog API key
+#   apikey = "my-secret-key" # required.
+#
+#   ## Connection timeout.
+#   # timeout = "5s"
+
+
+# # Send telegraf metrics to file(s)
+# [[outputs.file]]
+#   ## Files to write to, "stdout" is a specially handled file.
+#   files = ["stdout", "/tmp/metrics.out"]
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
+# # Configuration for Graphite server to send metrics to
+# [[outputs.graphite]]
+#   ## TCP endpoint for your graphite instance.
+#   ## If multiple endpoints are configured, output will be load balanced.
+#   ## Only one of the endpoints will be written to with each iteration.
+#   servers = ["localhost:2003"]
+#   ## Prefix metrics name
+#   prefix = ""
+#   ## Graphite output template
+#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   template = "host.tags.measurement.field"
+#   ## timeout in seconds for the write connection to graphite
+#   timeout = 2
+
+
+# # Send telegraf metrics to graylog(s)
+# [[outputs.graylog]]
+#   ## Udp endpoint for your graylog instance.
+#   servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
+
+
+# # Configuration for sending metrics to an Instrumental project
+# [[outputs.instrumental]]
+#   ## Project API Token (required)
+#   api_token = "API Token" # required
+#   ## Prefix the metrics with a given name
+#   prefix = ""
+#   ## Stats output template (Graphite formatting)
+#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+#   template = "host.tags.measurement.field"
+#   ## Timeout in seconds to connect
+#   timeout = "2s"
+#   ## Display Communcation to Instrumental
+#   debug = false
+
+
+# # Configuration for the Kafka server to send metrics to
+# [[outputs.kafka]]
+#   ## URLs of kafka brokers
+#   brokers = ["localhost:9092"]
+#   ## Kafka topic for producer messages
+#   topic = "telegraf"
+#   ## Telegraf tag to use as a routing key
+#   ##  ie, if this tag exists, it's value will be used as the routing key
+#   routing_tag = "host"
+#
+#   ## CompressionCodec represents the various compression codecs recognized by
+#   ## Kafka in messages.
+#   ##  0 : No compression
+#   ##  1 : Gzip compression
+#   ##  2 : Snappy compression
+#   compression_codec = 0
+#
+#   ##  RequiredAcks is used in Produce Requests to tell the broker how many
+#   ##  replica acknowledgements it must see before responding
+#   ##   0 : the producer never waits for an acknowledgement from the broker.
+#   ##       This option provides the lowest latency but the weakest durability
+#   ##       guarantees (some data will be lost when a server fails).
+#   ##   1 : the producer gets an acknowledgement after the leader replica has
+#   ##       received the data. This option provides better durability as the
+#   ##       client waits until the server acknowledges the request as successful
+#   ##       (only messages that were written to the now-dead leader but not yet
+#   ##       replicated will be lost).
+#   ##   -1: the producer gets an acknowledgement after all in-sync replicas have
+#   ##       received the data. This option provides the best durability, we
+#   ##       guarantee that no messages will be lost as long as at least one in
+#   ##       sync replica remains.
+#   required_acks = -1
+#
+#   ##  The total number of times to retry sending a message
+#   max_retry = 3
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
+# # Configuration for the AWS Kinesis output.
+# [[outputs.kinesis]]
+#   ## Amazon REGION of kinesis endpoint.
+#   region = "ap-southeast-2"
+#
+#   ## Amazon Credentials
+#   ## Credentials are loaded in the following order
+#   ## 1) Assumed credentials via STS if role_arn is specified
+#   ## 2) explicit credentials from 'access_key' and 'secret_key'
+#   ## 3) shared profile from 'profile'
+#   ## 4) environment variables
+#   ## 5) shared credentials file
+#   ## 6) EC2 Instance Profile
+#   #access_key = ""
+#   #secret_key = ""
+#   #token = ""
+#   #role_arn = ""
+#   #profile = ""
+#   #shared_credential_file = ""
+#
+#   ## Kinesis StreamName must exist prior to starting telegraf.
+#   streamname = "StreamName"
+#   ## PartitionKey as used for sharding data.
+#   partitionkey = "PartitionKey"
+#   ## format of the Data payload in the kinesis PutRecord, supported
+#   ## String and Custom.
+#   format = "string"
+#   ## debug will show upstream aws messages.
+#   debug = false
+
+
+# # Configuration for Librato API to send metrics to.
+# [[outputs.librato]]
+#   ## Librator API Docs
+#   ## http://dev.librato.com/v1/metrics-authentication
+#   ## Librato API user
+#   api_user = "telegraf@influxdb.com" # required.
+#   ## Librato API token
+#   api_token = "my-secret-token" # required.
+#   ## Debug
+#   # debug = false
+#   ## Connection timeout.
+#   # timeout = "5s"
+#   ## Output source Template (same as graphite buckets)
+#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+#   ## This template is used in librato's source (not metric's name)
+#   template = "host"
+#
+
+
+# # Configuration for MQTT server to send metrics to
+# [[outputs.mqtt]]
+#   servers = ["localhost:1883"] # required.
+#
+#   ## MQTT outputs send metrics to this topic format
+#   ##    "<topic_prefix>/<hostname>/<pluginname>/"
+#   ##   ex: prefix/web01.example.com/mem
+#   topic_prefix = "telegraf"
+#
+#   ## username and password to connect MQTT server.
+#   # username = "telegraf"
+#   # password = "metricsmetricsmetricsmetrics"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
+# # Send telegraf measurements to NSQD
+# [[outputs.nsq]]
+#   ## Location of nsqd instance listening on TCP
+#   server = "localhost:4150"
+#   ## NSQ topic for producer messages
+#   topic = "telegraf"
+#
+#   ## Data format to output.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+#   data_format = "influx"
+
+
+# # Configuration for OpenTSDB server to send metrics to
+# [[outputs.opentsdb]]
+#   ## prefix for metrics keys
+#   prefix = "my.specific.prefix."
+#
+#   ## Telnet Mode ##
+#   ## DNS name of the OpenTSDB server in telnet mode
+#   host = "opentsdb.example.com"
+#
+#   ## Port of the OpenTSDB server in telnet mode
+#   port = 4242
+#
+#   ## Debug true - Prints OpenTSDB communication
+#   debug = false
+
+
+# # Configuration for the Prometheus client to spawn
+# [[outputs.prometheus_client]]
+#   ## Address to listen on
+#   # listen = ":9126"
+
+
+# # Configuration for the Riemann server to send metrics to
+# [[outputs.riemann]]
+#   ## URL of server
+#   url = "localhost:5555"
+#   ## transport protocol to use either tcp or udp
+#   transport = "tcp"
+#   ## separator to use between input name and field name in Riemann service name
+#   separator = " "
+
+
+
+###############################################################################
+#                            INPUT PLUGINS                                    #
+###############################################################################
+
+# Read metrics about cpu usage
+[[inputs.cpu]]
+  ## Whether to report per-cpu stats or not
+  percpu = true
+  ## Whether to report total system cpu stats or not
+  totalcpu = true
+  ## Comment this line if you want the raw CPU time metrics
+  fielddrop = ["time_*"]
+
+
+# Read metrics about disk usage by mount point
+# [[inputs.disk]]
+  ## By default, telegraf gather stats for all mountpoints.
+  ## Setting mountpoints will restrict the stats to the specified mountpoints.
+  # mount_points = ["/"]
+
+  ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
+  ## present on /run, /var/run, /dev/shm or /dev).
+#  ignore_fs = ["tmpfs", "devtmpfs"]
+
+
+# Read metrics about disk IO by device
+# [[inputs.diskio]]
+  ## By default, telegraf will gather stats for all devices including
+  ## disk partitions.
+  ## Setting devices will restrict the stats to the specified devices.
+  # devices = ["sda", "sdb"]
+  ## Uncomment the following line if you need disk serial numbers.
+  # skip_serial_number = false
+
+
+# Get kernel statistics from /proc/stat
+[[inputs.kernel]]
+  # no configuration
+
+
+# Read metrics about memory usage
+[[inputs.mem]]
+  # no configuration
+
+
+# Get the number of processes and group them by status
+[[inputs.processes]]
+  # no configuration
+
+
+# Read metrics about swap memory usage
+# [[inputs.swap]]
+  # no configuration
+
+
+# Read metrics about system load & uptime
+[[inputs.system]]
+  # no configuration
+
+
+# # Read stats from aerospike server(s)
+# [[inputs.aerospike]]
+#   ## Aerospike servers to connect to (with port)
+#   ## This plugin will query all namespaces the aerospike
+#   ## server has configured and get stats for them.
+#   servers = ["localhost:3000"]
+
+
+# # Read Apache status information (mod_status)
+# [[inputs.apache]]
+#   ## An array of Apache status URI to gather stats.
+#   ## Default is "http://localhost/server-status?auto".
+#   urls = ["http://localhost/server-status?auto"]
+
+
+# # Read metrics of bcache from stats_total and dirty_data
+# [[inputs.bcache]]
+#   ## Bcache sets path
+#   ## If not specified, then default is:
+#   bcachePath = "/sys/fs/bcache"
+#
+#   ## By default, telegraf gather stats for all bcache devices
+#   ## Setting devices will restrict the stats to the specified
+#   ## bcache devices.
+#   bcacheDevs = ["bcache0"]
+
+
+# # Read Cassandra metrics through Jolokia
+# [[inputs.cassandra]]
+#   # This is the context root used to compose the jolokia url
+#   context = "/jolokia/read"
+#   ## List of cassandra servers exposing jolokia read service
+#   servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
+#   ## List of metrics collected on above servers
+#   ## Each metric consists of a jmx path.
+#   ## This will collect all heap memory usage metrics from the jvm and
+#   ## ReadLatency metrics for all keyspaces and tables.
+#   ## "type=Table" in the query works with Cassandra3.0. Older versions might
+#   ## need to use "type=ColumnFamily"
+#   metrics  = [
+#     "/java.lang:type=Memory/HeapMemoryUsage",
+#     "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
+#   ]
+
+
+# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
+# [[inputs.ceph]]
+#   ## All configuration values are optional, defaults are shown below
+#
+#   ## location of ceph binary
+#   ceph_binary = "/usr/bin/ceph"
+#
+#   ## directory in which to look for socket files
+#   socket_dir = "/var/run/ceph"
+#
+#   ## prefix of MON and OSD socket files, used to determine socket type
+#   mon_prefix = "ceph-mon"
+#   osd_prefix = "ceph-osd"
+#
+#   ## suffix used to identify socket files
+#   socket_suffix = "asok"
+
+
+# # Read specific statistics per cgroup
+# [[inputs.cgroup]]
+#     ## Directories in which to look for files, globs are supported.
+# 	# paths = [
+# 	#   "/cgroup/memory",
+# 	#   "/cgroup/memory/child1",
+# 	#   "/cgroup/memory/child2/*",
+# 	# ]
+# 	## cgroup stat fields, as file names, globs are supported.
+# 	## these file names are appended to each path from above.
+# 	# files = ["memory.*usage*", "memory.limit_in_bytes"]
+
+
+# # Get standard chrony metrics, requires chronyc executable.
+# [[inputs.chrony]]
+#   ## If true, chronyc tries to perform a DNS lookup for the time server.
+#   # dns_lookup = false
+
+
+# # Pull Metric Statistics from Amazon CloudWatch
+# [[inputs.cloudwatch]]
+#   ## Amazon Region
+#   region = 'us-east-1'
+#
+#   ## Amazon Credentials
+#   ## Credentials are loaded in the following order
+#   ## 1) Assumed credentials via STS if role_arn is specified
+#   ## 2) explicit credentials from 'access_key' and 'secret_key'
+#   ## 3) shared profile from 'profile'
+#   ## 4) environment variables
+#   ## 5) shared credentials file
+#   ## 6) EC2 Instance Profile
+#   #access_key = ""
+#   #secret_key = ""
+#   #token = ""
+#   #role_arn = ""
+#   #profile = ""
+#   #shared_credential_file = ""
+#
+#   ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
+#   period = '1m'
+#
+#   ## Collection Delay (required - must account for metrics availability via CloudWatch API)
+#   delay = '1m'
+#
+#   ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
+#   ## gaps or overlap in pulled data
+#   interval = '1m'
+#
+#   ## Configure the TTL for the internal cache of metrics.
+#   ## Defaults to 1 hr if not specified
+#   #cache_ttl = '10m'
+#
+#   ## Metric Statistic Namespace (required)
+#   namespace = 'AWS/ELB'
+#
+#   ## Metrics to Pull (optional)
+#   ## Defaults to all Metrics in Namespace if nothing is provided
+#   ## Refreshes Namespace available metrics every 1h
+#   #[[inputs.cloudwatch.metrics]]
+#   #  names = ['Latency', 'RequestCount']
+#   #
+#   #  ## Dimension filters for Metric (optional)
+#   #  [[inputs.cloudwatch.metrics.dimensions]]
+#   #    name = 'LoadBalancerName'
+#   #    value = 'p-example'
+
+
+# # Collects conntrack stats from the configured directories and files.
+# [[inputs.conntrack]]
+#    ## The following defaults would work with multiple versions of conntrack.
+#    ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
+#    ## kernel versions, as are the directory locations.
+#
+#    ## Superset of filenames to look for within the conntrack dirs.
+#    ## Missing files will be ignored.
+#    files = ["ip_conntrack_count","ip_conntrack_max",
+#             "nf_conntrack_count","nf_conntrack_max"]
+#
+#    ## Directories to search within for the conntrack files above.
+#    ## Missing directrories will be ignored.
+#    dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
+
+
+# # Gather health check statuses from services registered in Consul
+# [[inputs.consul]]
+#   ## Most of these values defaults to the one configured on a Consul's agent level.
+#   ## Optional Consul server address (default: "localhost")
+#   # address = "localhost"
+#   ## Optional URI scheme for the Consul server (default: "http")
+#   # scheme = "http"
+#   ## Optional ACL token used in every request (default: "")
+#   # token = ""
+#   ## Optional username used for request HTTP Basic Authentication (default: "")
+#   # username = ""
+#   ## Optional password used for HTTP Basic Authentication (default: "")
+#   # password = ""
+#   ## Optional data centre to query the health checks from (default: "")
+#   # datacentre = ""
+
+
+# # Read metrics from one or many couchbase clusters
+# [[inputs.couchbase]]
+#   ## specify servers via a url matching:
+#   ##  [protocol://][:password]@address[:port]
+#   ##  e.g.
+#   ##    http://couchbase-0.example.com/
+#   ##    http://admin:secret@couchbase-0.example.com:8091/
+#   ##
+#   ## If no servers are specified, then localhost is used as the host.
+#   ## If no protocol is specifed, HTTP is used.
+#   ## If no port is specified, 8091 is used.
+#   servers = ["http://localhost:8091"]
+
+
+# # Read CouchDB Stats from one or more servers
+# [[inputs.couchdb]]
+#   ## Works with CouchDB stats endpoints out of the box
+#   ## Multiple HOSTs from which to read CouchDB stats:
+#   hosts = ["http://localhost:8086/_stats"]
+
+
+# # Read metrics from one or many disque servers
+# [[inputs.disque]]
+#   ## An array of URI to gather stats about. Specify an ip or hostname
+#   ## with optional port and password.
+#   ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
+#   ## If no servers are specified, then localhost is used as the host.
+#   servers = ["localhost"]
+
+
+# # Query given DNS server and gives statistics
+# [[inputs.dns_query]]
+#   ## servers to query
+#   servers = ["8.8.8.8"] # required
+#
+#   ## Domains or subdomains to query. "."(root) is default
+#   domains = ["."] # optional
+#
+#   ## Query record type. Default is "A"
+#   ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+#   record_type = "A" # optional
+#
+#   ## Dns server port. 53 is default
+#   port = 53 # optional
+#
+#   ## Query timeout in seconds. Default is 2 seconds
+#   timeout = 2 # optional
+
+
+# # Read metrics about docker containers
+# [[inputs.docker]]
+#   ## Docker Endpoint
+#   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
+#   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
+#   endpoint = "unix:///var/run/docker.sock"
+#   ## Only collect metrics for these containers, collect all if empty
+#   container_names = []
+#   ## Timeout for docker list, info, and stats commands
+#   timeout = "5s"
+#
+#   ## Whether to report for each container per-device blkio (8:0, 8:1...) and
+#   ## network (eth0, eth1, ...) stats or not
+#   perdevice = true
+#   ## Whether to report for each container total blkio and network stats or not
+#   total = false
+#
+
+
+# # Read statistics from one or many dovecot servers
+# [[inputs.dovecot]]
+#   ## specify dovecot servers via an address:port list
+#   ##  e.g.
+#   ##    localhost:24242
+#   ##
+#   ## If no servers are specified, then localhost is used as the host.
+#   servers = ["localhost:24242"]
+#   ## Type is one of "user", "domain", "ip", or "global"
+#   type = "global"
+#   ## Wildcard matches like "*.com". An empty string "" is same as "*"
+#   ## If type = "ip" filters should be <IP/network>
+#   filters = [""]
+
+
+# # Read stats from one or more Elasticsearch servers or clusters
+# [[inputs.elasticsearch]]
+#   ## specify a list of one or more Elasticsearch servers
+#   servers = ["http://localhost:9200"]
+#
+#   ## set local to false when you want to read the indices stats from all nodes
+#   ## within the cluster
+#   local = true
+#
+#   ## set cluster_health to true when you want to also obtain cluster level stats
+#   cluster_health = false
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+
+
+# # Read metrics from one or more commands that can output to stdout
+# [[inputs.exec]]
+#   ## Commands array
+#   commands = [
+#     "/tmp/test.sh",
+#     "/usr/bin/mycollector --foo=bar",
+#     "/tmp/collect_*.sh"
+#   ]
+#
+#   ## Timeout for each command to complete.
+#   timeout = "5s"
+#
+#   ## measurement name suffix (for separating different commands)
+#   name_suffix = "_mycollector"
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Read stats about given file(s)
+# [[inputs.filestat]]
+#   ## Files to gather stats about.
+#   ## These accept standard unix glob matching rules, but with the addition of
+#   ## ** as a "super asterisk". ie:
+#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
+#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+#   ##   "/var/log/apache.log" -> just tail the apache log file
+#   ##
+#   ## See https://github.com/gobwas/glob for more examples
+#   ##
+#   files = ["/var/log/**.log"]
+#   ## If true, read the entire file and calculate an md5 checksum.
+#   md5 = false
+
+
+# # Read flattened metrics from one or more GrayLog HTTP endpoints
+# [[inputs.graylog]]
+#   ## API endpoint, currently supported API:
+#   ##
+#   ##   - multiple  (Ex http://<host>:12900/system/metrics/multiple)
+#   ##   - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
+#   ##
+#   ## For namespace endpoint, the metrics array will be ignored for that call.
+#   ## Endpoint can contain namespace and multiple type calls.
+#   ##
+#   ## Please check http://[graylog-server-ip]:12900/api-browser for full list
+#   ## of endpoints
+#   servers = [
+#     "http://[graylog-server-ip]:12900/system/metrics/multiple",
+#   ]
+#
+#   ## Metrics list
+#   ## List of metrics can be found on Graylog webservice documentation.
+#   ## Or by hitting the the web service api at:
+#   ##   http://[graylog-host]:12900/system/metrics
+#   metrics = [
+#     "jvm.cl.loaded",
+#     "jvm.memory.pools.Metaspace.committed"
+#   ]
+#
+#   ## Username and password
+#   username = ""
+#   password = ""
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+
+
+# # Read metrics of haproxy, via socket or csv stats page
+# [[inputs.haproxy]]
+#   ## An array of address to gather stats about. Specify an ip on hostname
+#   ## with optional port. ie localhost, 10.10.3.33:1936, etc.
+#   ## Make sure you specify the complete path to the stats endpoint
+#   ## ie 10.10.3.33:1936/haproxy?stats
+#   #
+#   ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
+#   servers = ["http://myhaproxy.com:1936/haproxy?stats"]
+#   ## Or you can also use local socket
+#   ## servers = ["socket:/run/haproxy/admin.sock"]
+
+
+# # Monitor disks' temperatures using hddtemp
+# [[inputs.hddtemp]]
+#   ## By default, telegraf gathers temps data from all disks detected by the
+#   ## hddtemp.
+#   ##
+#   ## Only collect temps from the selected disks.
+#   ##
+#   ## A * as the device name will return the temperature values of all disks.
+#   ##
+#   # address = "127.0.0.1:7634"
+#   # devices = ["sda", "*"]
+
+
+# # HTTP/HTTPS request given an address a method and a timeout
+# [[inputs.http_response]]
+#   ## Server address (default http://localhost)
+#   address = "http://github.com"
+#   ## Set response_timeout (default 5 seconds)
+#   response_timeout = "5s"
+#   ## HTTP Request Method
+#   method = "GET"
+#   ## Whether to follow redirects from the server (defaults to false)
+#   follow_redirects = true
+#   ## HTTP Request Headers (all values must be strings)
+#   # [inputs.http_response.headers]
+#   #   Host = "github.com"
+#   ## Optional HTTP Request Body
+#   # body = '''
+#   # {'fake':'data'}
+#   # '''
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+
+
+# # Read flattened metrics from one or more JSON HTTP endpoints
+# [[inputs.httpjson]]
+#   ## NOTE This plugin only reads numerical measurements, strings and booleans
+#   ## will be ignored.
+#
+#   ## a name for the service being polled
+#   name = "webserver_stats"
+#
+#   ## URL of each server in the service's cluster
+#   servers = [
+#     "http://localhost:9999/stats/",
+#     "http://localhost:9998/stats/",
+#   ]
+#
+#   ## HTTP method to use: GET or POST (case-sensitive)
+#   method = "GET"
+#
+#   ## List of tag names to extract from top-level of JSON server response
+#   # tag_keys = [
+#   #   "my_tag_1",
+#   #   "my_tag_2"
+#   # ]
+#
+#   ## HTTP parameters (all values must be strings)
+#   [inputs.httpjson.parameters]
+#     event_type = "cpu_spike"
+#     threshold = "0.75"
+#
+#   ## HTTP Header parameters (all values must be strings)
+#   # [inputs.httpjson.headers]
+#   #   X-Auth-Token = "my-xauth-token"
+#   #   apiVersion = "v1"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+
+
+# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
+# [[inputs.influxdb]]
+#   ## Works with InfluxDB debug endpoints out of the box,
+#   ## but other services can use this format too.
+#   ## See the influxdb plugin's README for more details.
+#
+#   ## Multiple URLs from which to read InfluxDB-formatted JSON
+#   ## Default is "http://localhost:8086/debug/vars".
+#   urls = [
+#     "http://localhost:8086/debug/vars"
+#   ]
+
+
+# # Read metrics from one or many bare metal servers
+# [[inputs.ipmi_sensor]]
+#   ## specify servers via a url matching:
+#   ##  [username[:password]@][protocol[(address)]]
+#   ##  e.g.
+#   ##    root:passwd@lan(127.0.0.1)
+#   ##
+#   servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+
+
+# # Read JMX metrics through Jolokia
+# [[inputs.jolokia]]
+#   ## This is the context root used to compose the jolokia url
+#   context = "/jolokia"
+#
+#   ## This specifies the mode used
+#   # mode = "proxy"
+#   #
+#   ## When in proxy mode this section is used to specify further
+#   ## proxy address configurations.
+#   ## Remember to change host address to fit your environment.
+#   # [inputs.jolokia.proxy]
+#   #   host = "127.0.0.1"
+#   #   port = "8080"
+#
+#
+#   ## List of servers exposing jolokia read service
+#   [[inputs.jolokia.servers]]
+#     name = "as-server-01"
+#     host = "127.0.0.1"
+#     port = "8080"
+#     # username = "myuser"
+#     # password = "mypassword"
+#
+#   ## List of metrics collected on above servers
+#   ## Each metric consists in a name, a jmx path and either
+#   ## a pass or drop slice attribute.
+#   ## This collect all heap memory usage metrics.
+#   [[inputs.jolokia.metrics]]
+#     name = "heap_memory_usage"
+#     mbean  = "java.lang:type=Memory"
+#     attribute = "HeapMemoryUsage"
+#
+#   ## This collect thread counts metrics.
+#   [[inputs.jolokia.metrics]]
+#     name = "thread_count"
+#     mbean  = "java.lang:type=Threading"
+#     attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
+#
+#   ## This collect number of class loaded/unloaded counts metrics.
+#   [[inputs.jolokia.metrics]]
+#     name = "class_count"
+#     mbean  = "java.lang:type=ClassLoading"
+#     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
+
+
+# # Get kernel statistics from /proc/vmstat
+# [[inputs.kernel_vmstat]]
+#   # no configuration
+
+
+# # Read metrics from a LeoFS Server via SNMP
+# [[inputs.leofs]]
+#   ## An array of URI to gather stats about LeoFS.
+#   ## Specify an ip or hostname with port. ie 127.0.0.1:4020
+#   servers = ["127.0.0.1:4021"]
+
+
+# # Read metrics from local Lustre service on OST, MDS
+# [[inputs.lustre2]]
+#   ## An array of /proc globs to search for Lustre stats
+#   ## If not specified, the default will work on Lustre 2.5.x
+#   ##
+#   # ost_procfiles = [
+#   #   "/proc/fs/lustre/obdfilter/*/stats",
+#   #   "/proc/fs/lustre/osd-ldiskfs/*/stats",
+#   #   "/proc/fs/lustre/obdfilter/*/job_stats",
+#   # ]
+#   # mds_procfiles = [
+#   #   "/proc/fs/lustre/mdt/*/md_stats",
+#   #   "/proc/fs/lustre/mdt/*/job_stats",
+#   # ]
+
+
+# # Gathers metrics from the /3.0/reports MailChimp API
+# [[inputs.mailchimp]]
+#   ## MailChimp API key
+#   ## get from https://admin.mailchimp.com/account/api/
+#   api_key = "" # required
+#   ## Reports for campaigns sent more than days_old ago will not be collected.
+#   ## 0 means collect all.
+#   days_old = 0
+#   ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
+#   # campaign_id = ""
+
+
+# # Read metrics from one or many memcached servers
+# [[inputs.memcached]]
+#   ## An array of address to gather stats about. Specify an ip on hostname
+#   ## with optional port. ie localhost, 10.0.0.1:11211, etc.
+#   servers = ["localhost:11211"]
+#   # unix_sockets = ["/var/run/memcached.sock"]
+
+
+# # Telegraf plugin for gathering metrics from N Mesos masters
+# [[inputs.mesos]]
+#   ## Timeout, in ms.
+#   timeout = 100
+#   ## A list of Mesos masters.
+#   masters = ["localhost:5050"]
+#   ## Master metrics groups to be collected, by default, all enabled.
+#   master_collections = [
+#     "resources",
+#     "master",
+#     "system",
+#     "agents",
+#     "frameworks",
+#     "tasks",
+#     "messages",
+#     "evqueue",
+#     "registrar",
+#   ]
+#   ## A list of Mesos slaves, default is []
+#   # slaves = []
+#   ## Slave metrics groups to be collected, by default, all enabled.
+#   # slave_collections = [
+#   #   "resources",
+#   #   "agent",
+#   #   "system",
+#   #   "executors",
+#   #   "tasks",
+#   #   "messages",
+#   # ]
+#   ## Include mesos tasks statistics, default is false
+#   # slave_tasks = true
+
+
+# # Read metrics from one or many MongoDB servers
+# [[inputs.mongodb]]
+#   ## An array of URI to gather stats about. Specify an ip or hostname
+#   ## with optional port add password. ie,
+#   ##   mongodb://user:auth_key@10.10.3.30:27017,
+#   ##   mongodb://10.10.3.33:18832,
+#   ##   10.0.0.1:10000, etc.
+#   servers = ["127.0.0.1:27017"]
+#   gather_perdb_stats = false
+
+
+# # Read metrics from one or many mysql servers
+# [[inputs.mysql]]
+#   ## specify servers via a url matching:
+#   ##  [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
+#   ##  see https://github.com/go-sql-driver/mysql#dsn-data-source-name
+#   ##  e.g.
+#   ##    db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
+#   ##    db_user@tcp(127.0.0.1:3306)/?tls=false
+#   #
+#   ## If no servers are specified, then localhost is used as the host.
+#   servers = ["tcp(127.0.0.1:3306)/"]
+#   ## the limits for metrics form perf_events_statements
+#   perf_events_statements_digest_text_limit  = 120
+#   perf_events_statements_limit              = 250
+#   perf_events_statements_time_limit         = 86400
+#   #
+#   ## if the list is empty, then metrics are gathered from all databasee tables
+#   table_schema_databases                    = []
+#   #
+#   ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
+#   gather_table_schema                       = false
+#   #
+#   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
+#   gather_process_list                       = true
+#   #
+#   ## gather auto_increment columns and max values from information schema
+#   gather_info_schema_auto_inc               = true
+#   #
+#   ## gather metrics from SHOW SLAVE STATUS command output
+#   gather_slave_status                       = true
+#   #
+#   ## gather metrics from SHOW BINARY LOGS command output
+#   gather_binary_logs                        = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
+#   gather_table_io_waits                     = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
+#   gather_table_lock_waits                   = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
+#   gather_index_io_waits                     = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
+#   gather_event_waits                        = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
+#   gather_file_events_stats                  = false
+#   #
+#   ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
+#   gather_perf_events_statements             = false
+#   #
+#   ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
+#   interval_slow                   = "30m"
+
+
+# # Read metrics about network interface usage
+# [[inputs.net]]
+#   ## By default, telegraf gathers stats from any up interface (excluding loopback)
+#   ## Setting interfaces will tell it to gather these explicit interfaces,
+#   ## regardless of status.
+#   ##
+#   # interfaces = ["eth0"]
+
+
+# # TCP or UDP 'ping' given url and collect response time in seconds
+# [[inputs.net_response]]
+#   ## Protocol, must be "tcp" or "udp"
+#   protocol = "tcp"
+#   ## Server address (default localhost)
+#   address = "github.com:80"
+#   ## Set timeout
+#   timeout = "1s"
+#
+#   ## Optional string sent to the server
+#   # send = "ssh"
+#   ## Optional expected string in answer
+#   # expect = "ssh"
+#   ## Set read timeout (only used if expecting a response)
+#   read_timeout = "1s"
+
+
+# # Read TCP metrics such as established, time wait and sockets counts.
+# [[inputs.netstat]]
+#   # no configuration
+
+
+# # Read Nginx's basic status information (ngx_http_stub_status_module)
+# [[inputs.nginx]]
+#   ## An array of Nginx stub_status URI to gather stats.
+#   urls = ["http://localhost/status"]
+
+
+# # Read NSQ topic and channel statistics.
+# [[inputs.nsq]]
+#   ## An array of NSQD HTTP API endpoints
+#   endpoints = ["http://localhost:4151"]
+
+
+# # Collect kernel snmp counters and network interface statistics
+# [[inputs.nstat]]
+#   ## file paths for proc files. If empty default paths will be used:
+#   ##    /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
+#   ## These can also be overridden with env variables, see README.
+#   proc_net_netstat = "/proc/net/netstat"
+#   proc_net_snmp = "/proc/net/snmp"
+#   proc_net_snmp6 = "/proc/net/snmp6"
+#   ## dump metrics with 0 values too
+#   dump_zeros       = true
+
+
+# # Get standard NTP query metrics, requires ntpq executable.
+# [[inputs.ntpq]]
+#   ## If false, set the -n ntpq flag. Can reduce metric gather time.
+#   dns_lookup = true
+
+
+# # Read metrics of passenger using passenger-status
+# [[inputs.passenger]]
+#   ## Path of passenger-status.
+#   ##
+#   ## Plugin gather metric via parsing XML output of passenger-status
+#   ## More information about the tool:
+#   ##   https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
+#   ##
+#   ## If no path is specified, then the plugin simply execute passenger-status
+#   ## hopefully it can be found in your PATH
+#   command = "passenger-status -v --show=xml"
+
+
+# # Read metrics of phpfpm, via HTTP status page or socket
+# [[inputs.phpfpm]]
+#   ## An array of addresses to gather stats about. Specify an ip or hostname
+#   ## with optional port and path
+#   ##
+#   ## Plugin can be configured in three modes (either can be used):
+#   ##   - http: the URL must start with http:// or https://, ie:
+#   ##       "http://localhost/status"
+#   ##       "http://192.168.130.1/status?full"
+#   ##
+#   ##   - unixsocket: path to fpm socket, ie:
+#   ##       "/var/run/php5-fpm.sock"
+#   ##      or using a custom fpm status path:
+#   ##       "/var/run/php5-fpm.sock:fpm-custom-status-path"
+#   ##
+#   ##   - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
+#   ##       "fcgi://10.0.0.12:9000/status"
+#   ##       "cgi://10.0.10.12:9001/status"
+#   ##
+#   ## Example of multiple gathering from local socket and remove host
+#   ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
+#   urls = ["http://localhost/status"]
+
+
+# # Ping given url(s) and return statistics
+# [[inputs.ping]]
+#   ## NOTE: this plugin forks the ping command. You may need to set capabilities
+#   ## via setcap cap_net_raw+p /bin/ping
+#   #
+#   ## urls to ping
+#   urls = ["www.google.com"] # required
+#   ## number of pings to send per collection (ping -c <COUNT>)
+#   count = 1 # required
+#   ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
+#   ping_interval = 0.0
+#   ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
+#   timeout = 1.0
+#   ## interface to send ping from (ping -I <INTERFACE>)
+#   interface = ""
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql]]
+#   ## specify address via a url matching:
+#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
+#   ##       ?sslmode=[disable|verify-ca|verify-full]
+#   ## or a simple string:
+#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
+#   ##
+#   ## All connection parameters are optional.
+#   ##
+#   ## Without the dbname parameter, the driver will default to a database
+#   ## with the same name as the user. This dbname is just for instantiating a
+#   ## connection with the server and doesn't restrict the databases we are trying
+#   ## to grab metrics for.
+#   ##
+#   address = "host=localhost user=postgres sslmode=disable"
+#
+#   ## A list of databases to pull metrics about. If not specified, metrics for all
+#   ## databases are gathered.
+#   # databases = ["app_production", "testing"]
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql_extensible]]
+#   ## specify address via a url matching:
+#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
+#   ##       ?sslmode=[disable|verify-ca|verify-full]
+#   ## or a simple string:
+#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
+#   #
+#   ## All connection parameters are optional.  #
+#   ## Without the dbname parameter, the driver will default to a database
+#   ## with the same name as the user. This dbname is just for instantiating a
+#   ## connection with the server and doesn't restrict the databases we are trying
+#   ## to grab metrics for.
+#   #
+#   address = "host=localhost user=postgres sslmode=disable"
+#   ## A list of databases to pull metrics about. If not specified, metrics for all
+#   ## databases are gathered.
+#   ## databases = ["app_production", "testing"]
+#   #
+#   # outputaddress = "db01"
+#   ## A custom name for the database that will be used as the "server" tag in the
+#   ## measurement output. If not specified, a default one generated from
+#   ## the connection address is used.
+#   #
+#   ## Define the toml config where the sql queries are stored
+#   ## New queries can be added, if the withdbname is set to true and there is no
+#   ## databases defined in the 'databases field', the sql query is ended by a
+#   ## 'is not null' in order to make the query succeed.
+#   ## Example :
+#   ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
+#   ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
+#   ## because the databases variable was set to ['postgres', 'pgbench' ] and the
+#   ## withdbname was true. Be careful that if the withdbname is set to false you
+#   ## don't have to define the where clause (aka with the dbname) the tagvalue
+#   ## field is used to define custom tags (separated by commas)
+#   ## The optional "measurement" value can be used to override the default
+#   ## output measurement name ("postgresql").
+#   #
+#   ## Structure :
+#   ## [[inputs.postgresql_extensible.query]]
+#   ##   sqlquery string
+#   ##   version string
+#   ##   withdbname boolean
+#   ##   tagvalue string (comma separated)
+#   ##   measurement string
+#   [[inputs.postgresql_extensible.query]]
+#     sqlquery="SELECT * FROM pg_stat_database"
+#     version=901
+#     withdbname=false
+#     tagvalue=""
+#     measurement=""
+#   [[inputs.postgresql_extensible.query]]
+#     sqlquery="SELECT * FROM pg_stat_bgwriter"
+#     version=901
+#     withdbname=false
+#     tagvalue="postgresql.stats"
+
+
+# # Read metrics from one or many PowerDNS servers
+# [[inputs.powerdns]]
+#   ## An array of sockets to gather stats about.
+#   ## Specify a path to unix socket.
+#   unix_sockets = ["/var/run/pdns.controlsocket"]
+
+
+# # Monitor process cpu and memory usage
+# [[inputs.procstat]]
+#   ## Must specify one of: pid_file, exe, or pattern
+#   ## PID file to monitor process
+#   pid_file = "/var/run/nginx.pid"
+#   ## executable name (ie, pgrep <exe>)
+#   # exe = "nginx"
+#   ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
+#   # pattern = "nginx"
+#   ## user as argument for pgrep (ie, pgrep -u <user>)
+#   # user = "nginx"
+#
+#   ## override for process_name
+#   ## This is optional; default is sourced from /proc/<pid>/status
+#   # process_name = "bar"
+#   ## Field name prefix
+#   prefix = ""
+#   ## comment this out if you want raw cpu_time stats
+#   fielddrop = ["cpu_time_*"]
+
+
+# # Read metrics from one or many prometheus clients
+# [[inputs.prometheus]]
+#   ## An array of urls to scrape metrics from.
+#   urls = ["http://localhost:9100/metrics"]
+#
+#   ## Use bearer token for authorization
+#   # bearer_token = /path/to/bearer/token
+#
+#   ## Optional SSL Config
+#   # ssl_ca = /path/to/cafile
+#   # ssl_cert = /path/to/certfile
+#   # ssl_key = /path/to/keyfile
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+
+
+# # Reads last_run_summary.yaml file and converts to measurments
+# [[inputs.puppetagent]]
+#   ## Location of puppet last run summary file
+#   location = "/var/lib/puppet/state/last_run_summary.yaml"
+
+
+# # Read metrics from one or many RabbitMQ servers via the management API
+# [[inputs.rabbitmq]]
+#   # url = "http://localhost:15672"
+#   # name = "rmq-server-1" # optional tag
+#   # username = "guest"
+#   # password = "guest"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## A list of nodes to pull metrics about. If not specified, metrics for
+#   ## all nodes are gathered.
+#   # nodes = ["rabbit@node1", "rabbit@node2"]
+
+
+# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
+# [[inputs.raindrops]]
+#   ## An array of raindrops middleware URI to gather stats.
+#   urls = ["http://localhost:8080/_raindrops"]
+
+
+# # Read metrics from one or many redis servers
+# [[inputs.redis]]
+#   ## specify servers via a url matching:
+#   ##  [protocol://][:password]@address[:port]
+#   ##  e.g.
+#   ##    tcp://localhost:6379
+#   ##    tcp://:password@192.168.99.100
+#   ##    unix:///var/run/redis.sock
+#   ##
+#   ## If no servers are specified, then localhost is used as the host.
+#   ## If no port is specified, 6379 is used
+#   servers = ["tcp://localhost:6379"]
+
+
+# # Read metrics from one or many RethinkDB servers
+# [[inputs.rethinkdb]]
+#   ## An array of URI to gather stats about. Specify an ip or hostname
+#   ## with optional port add password. ie,
+#   ##   rethinkdb://user:auth_key@10.10.3.30:28105,
+#   ##   rethinkdb://10.10.3.33:18832,
+#   ##   10.0.0.1:10000, etc.
+#   servers = ["127.0.0.1:28015"]
+
+
+# # Read metrics one or many Riak servers
+# [[inputs.riak]]
+#   # Specify a list of one or more riak http servers
+#   servers = ["http://localhost:8098"]
+
+
+# # Monitor sensors, requires lm-sensors package
+# [[inputs.sensors]]
+#   ## Remove numbers from field names.
+#   ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
+#   # remove_numbers = true
+
+
+# # Retrieves SNMP values from remote agents
+# [[inputs.snmp]]
+#   agents = [ "127.0.0.1:161" ]
+#   timeout = "5s"
+#   version = 2
+#
+#   # SNMPv1 & SNMPv2 parameters
+#   community = "public"
+#
+#   # SNMPv2 & SNMPv3 parameters
+#   max_repetitions = 50
+#
+#   # SNMPv3 parameters
+#   #sec_name = "myuser"
+#   #auth_protocol = "md5"         # Values: "MD5", "SHA", ""
+#   #auth_password = "password123"
+#   #sec_level = "authNoPriv"      # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
+#   #context_name = ""
+#   #priv_protocol = ""            # Values: "DES", "AES", ""
+#   #priv_password = ""
+#
+#   # measurement name
+#   name = "system"
+#   [[inputs.snmp.field]]
+#     name = "hostname"
+#     oid = ".1.0.0.1.1"
+#   [[inputs.snmp.field]]
+#     name = "uptime"
+#     oid = ".1.0.0.1.2"
+#   [[inputs.snmp.field]]
+#     name = "load"
+#     oid = ".1.0.0.1.3"
+#   [[inputs.snmp.field]]
+#     oid = "HOST-RESOURCES-MIB::hrMemorySize"
+#
+#   [[inputs.snmp.table]]
+#     # measurement name
+#     name = "remote_servers"
+#     inherit_tags = [ "hostname" ]
+#     [[inputs.snmp.table.field]]
+#       name = "server"
+#       oid = ".1.0.0.0.1.0"
+#       is_tag = true
+#     [[inputs.snmp.table.field]]
+#       name = "connections"
+#       oid = ".1.0.0.0.1.1"
+#     [[inputs.snmp.table.field]]
+#       name = "latency"
+#       oid = ".1.0.0.0.1.2"
+#
+#   [[inputs.snmp.table]]
+#     # auto populate table's fields using the MIB
+#     oid = "HOST-RESOURCES-MIB::hrNetworkTable"
+
+
+# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
+# [[inputs.snmp_legacy]]
+#   ## Use 'oids.txt' file to translate oids to names
+#   ## To generate 'oids.txt' you need to run:
+#   ##   snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
+#   ## Or if you have an other MIB folder with custom MIBs
+#   ##   snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
+#   snmptranslate_file = "/tmp/oids.txt"
+#   [[inputs.snmp.host]]
+#     address = "192.168.2.2:161"
+#     # SNMP community
+#     community = "public" # default public
+#     # SNMP version (1, 2 or 3)
+#     # Version 3 not supported yet
+#     version = 2 # default 2
+#     # SNMP response timeout
+#     timeout = 2.0 # default 2.0
+#     # SNMP request retries
+#     retries = 2 # default 2
+#     # Which get/bulk do you want to collect for this host
+#     collect = ["mybulk", "sysservices", "sysdescr"]
+#     # Simple list of OIDs to get, in addition to "collect"
+#     get_oids = []
+#
+#   [[inputs.snmp.host]]
+#     address = "192.168.2.3:161"
+#     community = "public"
+#     version = 2
+#     timeout = 2.0
+#     retries = 2
+#     collect = ["mybulk"]
+#     get_oids = [
+#         "ifNumber",
+#         ".1.3.6.1.2.1.1.3.0",
+#     ]
+#
+#   [[inputs.snmp.get]]
+#     name = "ifnumber"
+#     oid = "ifNumber"
+#
+#   [[inputs.snmp.get]]
+#     name = "interface_speed"
+#     oid = "ifSpeed"
+#     instance = "0"
+#
+#   [[inputs.snmp.get]]
+#     name = "sysuptime"
+#     oid = ".1.3.6.1.2.1.1.3.0"
+#     unit = "second"
+#
+#   [[inputs.snmp.bulk]]
+#     name = "mybulk"
+#     max_repetition = 127
+#     oid = ".1.3.6.1.2.1.1"
+#
+#   [[inputs.snmp.bulk]]
+#     name = "ifoutoctets"
+#     max_repetition = 127
+#     oid = "ifOutOctets"
+#
+#   [[inputs.snmp.host]]
+#     address = "192.168.2.13:161"
+#     #address = "127.0.0.1:161"
+#     community = "public"
+#     version = 2
+#     timeout = 2.0
+#     retries = 2
+#     #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
+#     collect = ["sysuptime" ]
+#     [[inputs.snmp.host.table]]
+#       name = "iftable3"
+#       include_instances = ["enp5s0", "eth1"]
+#
+#   # SNMP TABLEs
+#   # table without mapping neither subtables
+#   [[inputs.snmp.table]]
+#     name = "iftable1"
+#     oid = ".1.3.6.1.2.1.31.1.1.1"
+#
+#   # table without mapping but with subtables
+#   [[inputs.snmp.table]]
+#     name = "iftable2"
+#     oid = ".1.3.6.1.2.1.31.1.1.1"
+#     sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
+#
+#   # table with mapping but without subtables
+#   [[inputs.snmp.table]]
+#     name = "iftable3"
+#     oid = ".1.3.6.1.2.1.31.1.1.1"
+#     # if empty. get all instances
+#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+#     # if empty, get all subtables
+#
+#   # table with both mapping and subtables
+#   [[inputs.snmp.table]]
+#     name = "iftable4"
+#     oid = ".1.3.6.1.2.1.31.1.1.1"
+#     # if empty get all instances
+#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+#     # if empty get all subtables
+#     # sub_tables could be not "real subtables"
+#     sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
+
+
+# # Read metrics from Microsoft SQL Server
+# [[inputs.sqlserver]]
+#   ## Specify instances to monitor with a list of connection strings.
+#   ## All connection parameters are optional.
+#   ## By default, the host is localhost, listening on default port, TCP 1433.
+#   ##   for Windows, the user is the currently running AD user (SSO).
+#   ##   See https://github.com/denisenkom/go-mssqldb for detailed connection
+#   ##   parameters.
+#   # servers = [
+#   #  "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
+#   # ]
+
+
+# # Sysstat metrics collector
+# [[inputs.sysstat]]
+#   ## Path to the sadc command.
+#   #
+#   ## Common Defaults:
+#   ##   Debian/Ubuntu: /usr/lib/sysstat/sadc
+#   ##   Arch:          /usr/lib/sa/sadc
+#   ##   RHEL/CentOS:   /usr/lib64/sa/sadc
+#   sadc_path = "/usr/lib/sa/sadc" # required
+#   #
+#   #
+#   ## Path to the sadf command, if it is not in PATH
+#   # sadf_path = "/usr/bin/sadf"
+#   #
+#   #
+#   ## Activities is a list of activities, that are passed as argument to the
+#   ## sadc collector utility (e.g: DISK, SNMP etc...)
+#   ## The more activities that are added, the more data is collected.
+#   # activities = ["DISK"]
+#   #
+#   #
+#   ## Group metrics to measurements.
+#   ##
+#   ## If group is false each metric will be prefixed with a description
+#   ## and represents itself a measurement.
+#   ##
+#   ## If Group is true, corresponding metrics are grouped to a single measurement.
+#   # group = true
+#   #
+#   #
+#   ## Options for the sadf command. The values on the left represent the sadf
+#   ## options and the values on the right their description (wich are used for
+#   ## grouping and prefixing metrics).
+#   ##
+#   ## Run 'sar -h' or 'man sar' to find out the supported options for your
+#   ## sysstat version.
+#   [inputs.sysstat.options]
+#     -C = "cpu"
+#     -B = "paging"
+#     -b = "io"
+#     -d = "disk"             # requires DISK activity
+#     "-n ALL" = "network"
+#     "-P ALL" = "per_cpu"
+#     -q = "queue"
+#     -R = "mem"
+#     -r = "mem_util"
+#     -S = "swap_util"
+#     -u = "cpu_util"
+#     -v = "inode"
+#     -W = "swap"
+#     -w = "task"
+#   #  -H = "hugepages"        # only available for newer linux distributions
+#   #  "-I ALL" = "interrupts" # requires INT activity
+#   #
+#   #
+#   ## Device tags can be used to add additional tags for devices.
+#   ## For example the configuration below adds a tag vg with value rootvg for
+#   ## all metrics with sda devices.
+#   # [[inputs.sysstat.device_tags.sda]]
+#   #  vg = "rootvg"
+
+
+# # Inserts sine and cosine waves for demonstration purposes
+# [[inputs.trig]]
+#   ## Set the amplitude
+#   amplitude = 10.0
+
+
+# # Read Twemproxy stats data
+# [[inputs.twemproxy]]
+#   ## Twemproxy stats address and port (no scheme)
+#   addr = "localhost:22222"
+#   ## Monitor pool name
+#   pools = ["redis_pool", "mc_pool"]
+
+
+# # A plugin to collect stats from Varnish HTTP Cache
+# [[inputs.varnish]]
+#   ## The default location of the varnishstat binary can be overridden with:
+#   binary = "/usr/bin/varnishstat"
+#
+#   ## By default, telegraf gather stats for 3 metric points.
+#   ## Setting stats will override the defaults shown below.
+#   ## Glob matching can be used, ie, stats = ["MAIN.*"]
+#   ## stats may also be set to ["*"], which will collect all stats
+#   stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
+
+
+# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
+# [[inputs.zfs]]
+#   ## ZFS kstat path. Ignored on FreeBSD
+#   ## If not specified, then default is:
+#   # kstatPath = "/proc/spl/kstat/zfs"
+#
+#   ## By default, telegraf gather all zfs stats
+#   ## If not specified, then default is:
+#   # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
+#
+#   ## By default, don't gather zpool stats
+#   # poolMetrics = false
+
+
+# # Reads 'mntr' stats from one or many zookeeper servers
+# [[inputs.zookeeper]]
+#   ## An array of address to gather stats about. Specify an ip or hostname
+#   ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
+#
+#   ## If no servers are specified, then localhost is used as the host.
+#   ## If no port is specified, 2181 is used
+#   servers = [":2181"]
+
+
+
+###############################################################################
+#                            SERVICE INPUT PLUGINS                            #
+###############################################################################
+
+# # Read metrics from Kafka topic(s)
+# [[inputs.kafka_consumer]]
+#   ## topic(s) to consume
+#   topics = ["telegraf"]
+#   ## an array of Zookeeper connection strings
+#   zookeeper_peers = ["localhost:2181"]
+#   ## Zookeeper Chroot
+#   zookeeper_chroot = ""
+#   ## the name of the consumer group
+#   consumer_group = "telegraf_metrics_consumers"
+#   ## Offset (must be either "oldest" or "newest")
+#   offset = "oldest"
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Stream and parse log file(s).
+# [[inputs.logparser]]
+#   ## Log files to parse.
+#   ## These accept standard unix glob matching rules, but with the addition of
+#   ## ** as a "super asterisk". ie:
+#   ##   /var/log/**.log     -> recursively find all .log files in /var/log
+#   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
+#   ##   /var/log/apache.log -> only tail the apache log file
+#   files = ["/var/log/apache/access.log"]
+#   ## Read file from beginning.
+#   from_beginning = false
+#
+#   ## Parse logstash-style "grok" patterns:
+#   ##   Telegraf built-in parsing patterns: https://goo.gl/dkay10
+#   [inputs.logparser.grok]
+#     ## This is a list of patterns to check the given log file(s) for.
+#     ## Note that adding patterns here increases processing time. The most
+#     ## efficient configuration is to have one pattern per logparser.
+#     ## Other common built-in patterns are:
+#     ##   %{COMMON_LOG_FORMAT}   (plain apache & nginx access logs)
+#     ##   %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
+#     patterns = ["%{COMBINED_LOG_FORMAT}"]
+#     ## Name of the outputted measurement name.
+#     measurement = "apache_access_log"
+#     ## Full path(s) to custom pattern files.
+#     custom_pattern_files = []
+#     ## Custom patterns can also be defined here. Put one pattern per line.
+#     custom_patterns = '''
+#     '''
+
+
+# # Read metrics from MQTT topic(s)
+# [[inputs.mqtt_consumer]]
+#   servers = ["localhost:1883"]
+#   ## MQTT QoS, must be 0, 1, or 2
+#   qos = 0
+#
+#   ## Topics to subscribe to
+#   topics = [
+#     "telegraf/host01/cpu",
+#     "telegraf/+/mem",
+#     "sensors/#",
+#   ]
+#
+#   # if true, messages that can't be delivered while the subscriber is offline
+#   # will be delivered when it comes back (such as on service restart).
+#   # NOTE: if true, client_id MUST be set
+#   persistent_session = false
+#   # If empty, a random client ID will be generated.
+#   client_id = ""
+#
+#   ## username and password to connect MQTT server.
+#   # username = "telegraf"
+#   # password = "metricsmetricsmetricsmetrics"
+#
+#   ## Optional SSL Config
+#   # ssl_ca = "/etc/telegraf/ca.pem"
+#   # ssl_cert = "/etc/telegraf/cert.pem"
+#   # ssl_key = "/etc/telegraf/key.pem"
+#   ## Use SSL but skip chain & host verification
+#   # insecure_skip_verify = false
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Read metrics from NATS subject(s)
+# [[inputs.nats_consumer]]
+#   ## urls of NATS servers
+#   servers = ["nats://localhost:4222"]
+#   ## Use Transport Layer Security
+#   secure = false
+#   ## subject(s) to consume
+#   subjects = ["telegraf"]
+#   ## name a queue group
+#   queue_group = "telegraf_consumers"
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Read NSQ topic for metrics.
+# [[inputs.nsq_consumer]]
+#   ## An string representing the NSQD TCP Endpoint
+#   server = "localhost:4150"
+#   topic = "telegraf"
+#   channel = "consumer"
+#   max_in_flight = 100
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Statsd Server
+[[inputs.statsd]]
+#   ## Address and port to host UDP listener on
+service_address = ":8125"
+#   ## Delete gauges every interval (default=false)
+#   delete_gauges = false
+#   ## Delete counters every interval (default=false)
+#   delete_counters = false
+#   ## Delete sets every interval (default=false)
+#   delete_sets = false
+#   ## Delete timings & histograms every interval (default=true)
+#   delete_timings = true
+#   ## Percentiles to calculate for timing & histogram stats
+#   percentiles = [90]
+#
+#   ## separator to use between elements of a statsd metric
+#   metric_separator = "_"
+#
+#   ## Parses tags in the datadog statsd format
+#   ## http://docs.datadoghq.com/guides/dogstatsd/
+#   parse_data_dog_tags = false
+#
+#   ## Statsd data translation templates, more info can be read here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
+ templates = [
+     "cpu.* measurement*"
+ ]
+#
+#   ## Number of UDP messages allowed to queue up, once filled,
+#   ## the statsd server will start dropping packets
+allowed_pending_messages = 100
+#
+#   ## Number of timing/histogram values to track per-measurement in the
+#   ## calculation of percentiles. Raising this limit increases the accuracy
+#   ## of percentiles but also increases the memory usage and cpu time.
+#   percentile_limit = 1000
+
+
+# # Stream a log file, like the tail -f command
+# [[inputs.tail]]
+#   ## files to tail.
+#   ## These accept standard unix glob matching rules, but with the addition of
+#   ## ** as a "super asterisk". ie:
+#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
+#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+#   ##   "/var/log/apache.log" -> just tail the apache log file
+#   ##
+#   ## See https://github.com/gobwas/glob for more examples
+#   ##
+#   files = ["/var/mymetrics.out"]
+#   ## Read file from beginning.
+#   from_beginning = false
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Generic TCP listener
+# [[inputs.tcp_listener]]
+#   ## Address and port to host TCP listener on
+#   service_address = ":8094"
+#
+#   ## Number of TCP messages allowed to queue up. Once filled, the
+#   ## TCP listener will start dropping packets.
+#   allowed_pending_messages = 10000
+#
+#   ## Maximum number of concurrent TCP connections to allow
+#   max_tcp_connections = 250
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # Generic UDP listener
+# [[inputs.udp_listener]]
+#   ## Address and port to host UDP listener on
+#   service_address = ":8092"
+#
+#   ## Number of UDP messages allowed to queue up. Once filled, the
+#   ## UDP listener will start dropping packets.
+#   allowed_pending_messages = 10000
+#
+#   ## Data format to consume.
+#   ## Each data format has it's own unique set of configuration options, read
+#   ## more about them here:
+#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+#   data_format = "influx"
+
+
+# # A Webhooks Event collector
+# [[inputs.webhooks]]
+#   ## Address and port to host Webhook listener on
+#   service_address = ":1619"
+#
+#   [inputs.webhooks.github]
+#     path = "/github"
+#
+#   [inputs.webhooks.mandrill]
+#     path = "/mandrill"
+#
+#   [inputs.webhooks.rollbar]
+#     path = "/rollbar"