telegraf.conf 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944
  1. # Telegraf Configuration
  2. #
  3. # Telegraf is entirely plugin driven. All metrics are gathered from the
  4. # declared inputs, and sent to the declared outputs.
  5. #
  6. # Plugins must be declared in here to be active.
  7. # To deactivate a plugin, comment out the name and any variables.
  8. #
  9. # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
  10. # file would generate.
  11. #
  12. # Environment variables can be used anywhere in this config file, simply prepend
  13. # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
  14. # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
  15. # Global tags can be specified here in key="value" format.
  16. [global_tags]
  17. # dc = "us-east-1" # will tag all metrics with dc=us-east-1
  18. # rack = "1a"
  19. ## Environment variables can be used as tags, and throughout the config file
  20. # user = "$USER"
  21. # Configuration for telegraf agent
  22. [agent]
  23. ## Default data collection interval for all inputs
  24. interval = "20s"
  25. ## Rounds collection interval to 'interval'
  26. ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
  27. round_interval = false
  28. ## Telegraf will send metrics to outputs in batches of at
  29. ## most metric_batch_size metrics.
  30. metric_batch_size = 1000
  31. ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
  32. ## output, and will flush this buffer on a successful write. Oldest metrics
  33. ## are dropped first when this buffer fills.
  34. metric_buffer_limit = 1000
  35. ## Collection jitter is used to jitter the collection by a random amount.
  36. ## Each plugin will sleep for a random time within jitter before collecting.
  37. ## This can be used to avoid many plugins querying things like sysfs at the
  38. ## same time, which can have a measurable effect on the system.
  39. collection_jitter = "2s"
  40. ## Default flushing interval for all outputs. You shouldn't set this below
  41. ## interval. Maximum flush_interval will be flush_interval + flush_jitter
  42. flush_interval = "10s"
  43. ## Jitter the flush interval by a random amount. This is primarily to avoid
  44. ## large write spikes for users running a large number of telegraf instances.
  45. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
  46. flush_jitter = "5s"
  47. ## By default, precision will be set to the same timestamp order as the
  48. ## collection interval, with the maximum being 1s.
  49. ## Precision will NOT be used for service inputs, such as logparser and statsd.
  50. ## Valid values are "ns", "us" (or "µs"), "ms", "s".
  51. precision = ""
  52. ## Run telegraf in debug mode
  53. debug = false
  54. ## Run telegraf in quiet mode
  55. quiet = true
  56. ## Override default hostname, if empty use os.Hostname()
  57. hostname = ""
  58. ## If set to true, do no set the "host" tag in the telegraf agent.
  59. omit_hostname = false
  60. ###############################################################################
  61. # OUTPUT PLUGINS #
  62. ###############################################################################
  63. # Configuration for influxdb server to send metrics to
  64. [[outputs.influxdb]]
  65. ## The full HTTP or UDP endpoint URL for your InfluxDB instance.
  66. ## Multiple urls can be specified as part of the same cluster,
  67. ## this means that only ONE of the urls will be written to each interval.
  68. # urls = ["udp://influxdb_local:8089"] # UDP endpoint example
  69. urls = ["http://influxdb_local:8086"] # required
  70. ## The target database for metrics (telegraf will create it if not exists).
  71. database = "telegraf" # required
  72. ## Retention policy to write to. Empty string writes to the default rp.
  73. retention_policy = ""
  74. ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
  75. write_consistency = "any"
  76. ## Write timeout (for the InfluxDB client), formatted as a string.
  77. ## If not provided, will default to 5s. 0s means no timeout (not recommended).
  78. timeout = "35s"
  79. # username = "telegraf"
  80. # password = "metricsmetricsmetricsmetrics"
  81. ## Set the user agent for HTTP POSTs (can be useful for log differentiation)
  82. # user_agent = "telegraf"
  83. ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
  84. # udp_payload = 512
  85. ## Optional SSL Config
  86. # ssl_ca = "/etc/telegraf/ca.pem"
  87. # ssl_cert = "/etc/telegraf/cert.pem"
  88. # ssl_key = "/etc/telegraf/key.pem"
  89. ## Use SSL but skip chain & host verification
  90. # insecure_skip_verify = false
  91. # # Configuration for Amon Server to send metrics to.
  92. # [[outputs.amon]]
  93. # ## Amon Server Key
  94. # server_key = "my-server-key" # required.
  95. #
  96. # ## Amon Instance URL
  97. # amon_instance = "https://youramoninstance" # required
  98. #
  99. # ## Connection timeout.
  100. # # timeout = "5s"
  101. # # Configuration for the AMQP server to send metrics to
  102. # [[outputs.amqp]]
  103. # ## AMQP url
  104. # url = "amqp://localhost:5672/influxdb"
  105. # ## AMQP exchange
  106. # exchange = "telegraf"
  107. # ## Auth method. PLAIN and EXTERNAL are supported
  108. # # auth_method = "PLAIN"
  109. # ## Telegraf tag to use as a routing key
  110. # ## ie, if this tag exists, it's value will be used as the routing key
  111. # routing_tag = "host"
  112. #
  113. # ## InfluxDB retention policy
  114. # # retention_policy = "default"
  115. # ## InfluxDB database
  116. # # database = "telegraf"
  117. # ## InfluxDB precision
  118. # # precision = "s"
  119. #
  120. # ## Optional SSL Config
  121. # # ssl_ca = "/etc/telegraf/ca.pem"
  122. # # ssl_cert = "/etc/telegraf/cert.pem"
  123. # # ssl_key = "/etc/telegraf/key.pem"
  124. # ## Use SSL but skip chain & host verification
  125. # # insecure_skip_verify = false
  126. #
  127. # ## Data format to output.
  128. # ## Each data format has it's own unique set of configuration options, read
  129. # ## more about them here:
  130. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  131. # data_format = "influx"
  132. # # Configuration for AWS CloudWatch output.
  133. # [[outputs.cloudwatch]]
  134. # ## Amazon REGION
  135. # region = 'us-east-1'
  136. #
  137. # ## Amazon Credentials
  138. # ## Credentials are loaded in the following order
  139. # ## 1) Assumed credentials via STS if role_arn is specified
  140. # ## 2) explicit credentials from 'access_key' and 'secret_key'
  141. # ## 3) shared profile from 'profile'
  142. # ## 4) environment variables
  143. # ## 5) shared credentials file
  144. # ## 6) EC2 Instance Profile
  145. # #access_key = ""
  146. # #secret_key = ""
  147. # #token = ""
  148. # #role_arn = ""
  149. # #profile = ""
  150. # #shared_credential_file = ""
  151. #
  152. # ## Namespace for the CloudWatch MetricDatums
  153. # namespace = 'InfluxData/Telegraf'
  154. # # Configuration for DataDog API to send metrics to.
  155. # [[outputs.datadog]]
  156. # ## Datadog API key
  157. # apikey = "my-secret-key" # required.
  158. #
  159. # ## Connection timeout.
  160. # # timeout = "5s"
  161. # # Send telegraf metrics to file(s)
  162. # [[outputs.file]]
  163. # ## Files to write to, "stdout" is a specially handled file.
  164. # files = ["stdout", "/tmp/metrics.out"]
  165. #
  166. # ## Data format to output.
  167. # ## Each data format has it's own unique set of configuration options, read
  168. # ## more about them here:
  169. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  170. # data_format = "influx"
  171. # # Configuration for Graphite server to send metrics to
  172. # [[outputs.graphite]]
  173. # ## TCP endpoint for your graphite instance.
  174. # ## If multiple endpoints are configured, output will be load balanced.
  175. # ## Only one of the endpoints will be written to with each iteration.
  176. # servers = ["localhost:2003"]
  177. # ## Prefix metrics name
  178. # prefix = ""
  179. # ## Graphite output template
  180. # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  181. # template = "host.tags.measurement.field"
  182. # ## timeout in seconds for the write connection to graphite
  183. # timeout = 2
  184. # # Send telegraf metrics to graylog(s)
  185. # [[outputs.graylog]]
  186. # ## Udp endpoint for your graylog instance.
  187. # servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
  188. # # Configuration for sending metrics to an Instrumental project
  189. # [[outputs.instrumental]]
  190. # ## Project API Token (required)
  191. # api_token = "API Token" # required
  192. # ## Prefix the metrics with a given name
  193. # prefix = ""
  194. # ## Stats output template (Graphite formatting)
  195. # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
  196. # template = "host.tags.measurement.field"
  197. # ## Timeout in seconds to connect
  198. # timeout = "2s"
  199. # ## Display Communcation to Instrumental
  200. # debug = false
  201. # # Configuration for the Kafka server to send metrics to
  202. # [[outputs.kafka]]
  203. # ## URLs of kafka brokers
  204. # brokers = ["localhost:9092"]
  205. # ## Kafka topic for producer messages
  206. # topic = "telegraf"
  207. # ## Telegraf tag to use as a routing key
  208. # ## ie, if this tag exists, it's value will be used as the routing key
  209. # routing_tag = "host"
  210. #
  211. # ## CompressionCodec represents the various compression codecs recognized by
  212. # ## Kafka in messages.
  213. # ## 0 : No compression
  214. # ## 1 : Gzip compression
  215. # ## 2 : Snappy compression
  216. # compression_codec = 0
  217. #
  218. # ## RequiredAcks is used in Produce Requests to tell the broker how many
  219. # ## replica acknowledgements it must see before responding
  220. # ## 0 : the producer never waits for an acknowledgement from the broker.
  221. # ## This option provides the lowest latency but the weakest durability
  222. # ## guarantees (some data will be lost when a server fails).
  223. # ## 1 : the producer gets an acknowledgement after the leader replica has
  224. # ## received the data. This option provides better durability as the
  225. # ## client waits until the server acknowledges the request as successful
  226. # ## (only messages that were written to the now-dead leader but not yet
  227. # ## replicated will be lost).
  228. # ## -1: the producer gets an acknowledgement after all in-sync replicas have
  229. # ## received the data. This option provides the best durability, we
  230. # ## guarantee that no messages will be lost as long as at least one in
  231. # ## sync replica remains.
  232. # required_acks = -1
  233. #
  234. # ## The total number of times to retry sending a message
  235. # max_retry = 3
  236. #
  237. # ## Optional SSL Config
  238. # # ssl_ca = "/etc/telegraf/ca.pem"
  239. # # ssl_cert = "/etc/telegraf/cert.pem"
  240. # # ssl_key = "/etc/telegraf/key.pem"
  241. # ## Use SSL but skip chain & host verification
  242. # # insecure_skip_verify = false
  243. #
  244. # ## Data format to output.
  245. # ## Each data format has it's own unique set of configuration options, read
  246. # ## more about them here:
  247. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  248. # data_format = "influx"
  249. # # Configuration for the AWS Kinesis output.
  250. # [[outputs.kinesis]]
  251. # ## Amazon REGION of kinesis endpoint.
  252. # region = "ap-southeast-2"
  253. #
  254. # ## Amazon Credentials
  255. # ## Credentials are loaded in the following order
  256. # ## 1) Assumed credentials via STS if role_arn is specified
  257. # ## 2) explicit credentials from 'access_key' and 'secret_key'
  258. # ## 3) shared profile from 'profile'
  259. # ## 4) environment variables
  260. # ## 5) shared credentials file
  261. # ## 6) EC2 Instance Profile
  262. # #access_key = ""
  263. # #secret_key = ""
  264. # #token = ""
  265. # #role_arn = ""
  266. # #profile = ""
  267. # #shared_credential_file = ""
  268. #
  269. # ## Kinesis StreamName must exist prior to starting telegraf.
  270. # streamname = "StreamName"
  271. # ## PartitionKey as used for sharding data.
  272. # partitionkey = "PartitionKey"
  273. # ## format of the Data payload in the kinesis PutRecord, supported
  274. # ## String and Custom.
  275. # format = "string"
  276. # ## debug will show upstream aws messages.
  277. # debug = false
  278. # # Configuration for Librato API to send metrics to.
  279. # [[outputs.librato]]
  280. # ## Librator API Docs
  281. # ## http://dev.librato.com/v1/metrics-authentication
  282. # ## Librato API user
  283. # api_user = "telegraf@influxdb.com" # required.
  284. # ## Librato API token
  285. # api_token = "my-secret-token" # required.
  286. # ## Debug
  287. # # debug = false
  288. # ## Connection timeout.
  289. # # timeout = "5s"
  290. # ## Output source Template (same as graphite buckets)
  291. # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
  292. # ## This template is used in librato's source (not metric's name)
  293. # template = "host"
  294. #
  295. # # Configuration for MQTT server to send metrics to
  296. # [[outputs.mqtt]]
  297. # servers = ["localhost:1883"] # required.
  298. #
  299. # ## MQTT outputs send metrics to this topic format
  300. # ## "<topic_prefix>/<hostname>/<pluginname>/"
  301. # ## ex: prefix/web01.example.com/mem
  302. # topic_prefix = "telegraf"
  303. #
  304. # ## username and password to connect MQTT server.
  305. # # username = "telegraf"
  306. # # password = "metricsmetricsmetricsmetrics"
  307. #
  308. # ## Optional SSL Config
  309. # # ssl_ca = "/etc/telegraf/ca.pem"
  310. # # ssl_cert = "/etc/telegraf/cert.pem"
  311. # # ssl_key = "/etc/telegraf/key.pem"
  312. # ## Use SSL but skip chain & host verification
  313. # # insecure_skip_verify = false
  314. #
  315. # ## Data format to output.
  316. # ## Each data format has it's own unique set of configuration options, read
  317. # ## more about them here:
  318. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  319. # data_format = "influx"
  320. # # Send telegraf measurements to NSQD
  321. # [[outputs.nsq]]
  322. # ## Location of nsqd instance listening on TCP
  323. # server = "localhost:4150"
  324. # ## NSQ topic for producer messages
  325. # topic = "telegraf"
  326. #
  327. # ## Data format to output.
  328. # ## Each data format has it's own unique set of configuration options, read
  329. # ## more about them here:
  330. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  331. # data_format = "influx"
  332. # # Configuration for OpenTSDB server to send metrics to
  333. # [[outputs.opentsdb]]
  334. # ## prefix for metrics keys
  335. # prefix = "my.specific.prefix."
  336. #
  337. # ## Telnet Mode ##
  338. # ## DNS name of the OpenTSDB server in telnet mode
  339. # host = "opentsdb.example.com"
  340. #
  341. # ## Port of the OpenTSDB server in telnet mode
  342. # port = 4242
  343. #
  344. # ## Debug true - Prints OpenTSDB communication
  345. # debug = false
  346. # # Configuration for the Prometheus client to spawn
  347. # [[outputs.prometheus_client]]
  348. # ## Address to listen on
  349. # # listen = ":9126"
  350. # # Configuration for the Riemann server to send metrics to
  351. # [[outputs.riemann]]
  352. # ## URL of server
  353. # url = "localhost:5555"
  354. # ## transport protocol to use either tcp or udp
  355. # transport = "tcp"
  356. # ## separator to use between input name and field name in Riemann service name
  357. # separator = " "
  358. ###############################################################################
  359. # INPUT PLUGINS #
  360. ###############################################################################
  361. # Read metrics about cpu usage
  362. [[inputs.cpu]]
  363. ## Whether to report per-cpu stats or not
  364. percpu = true
  365. ## Whether to report total system cpu stats or not
  366. totalcpu = true
  367. ## Comment this line if you want the raw CPU time metrics
  368. fielddrop = ["time_*"]
  369. # Read metrics about disk usage by mount point
  370. # [[inputs.disk]]
  371. ## By default, telegraf gather stats for all mountpoints.
  372. ## Setting mountpoints will restrict the stats to the specified mountpoints.
  373. # mount_points = ["/"]
  374. ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
  375. ## present on /run, /var/run, /dev/shm or /dev).
  376. # ignore_fs = ["tmpfs", "devtmpfs"]
  377. # Read metrics about disk IO by device
  378. # [[inputs.diskio]]
  379. ## By default, telegraf will gather stats for all devices including
  380. ## disk partitions.
  381. ## Setting devices will restrict the stats to the specified devices.
  382. # devices = ["sda", "sdb"]
  383. ## Uncomment the following line if you need disk serial numbers.
  384. # skip_serial_number = false
  385. # Get kernel statistics from /proc/stat
  386. [[inputs.kernel]]
  387. # no configuration
  388. # Read metrics about memory usage
  389. [[inputs.mem]]
  390. # no configuration
  391. # Get the number of processes and group them by status
  392. [[inputs.processes]]
  393. # no configuration
  394. # Read metrics about swap memory usage
  395. # [[inputs.swap]]
  396. # no configuration
  397. # Read metrics about system load & uptime
  398. [[inputs.system]]
  399. # no configuration
  400. # # Read stats from aerospike server(s)
  401. # [[inputs.aerospike]]
  402. # ## Aerospike servers to connect to (with port)
  403. # ## This plugin will query all namespaces the aerospike
  404. # ## server has configured and get stats for them.
  405. # servers = ["localhost:3000"]
  406. # # Read Apache status information (mod_status)
  407. # [[inputs.apache]]
  408. # ## An array of Apache status URI to gather stats.
  409. # ## Default is "http://localhost/server-status?auto".
  410. # urls = ["http://localhost/server-status?auto"]
  411. # # Read metrics of bcache from stats_total and dirty_data
  412. # [[inputs.bcache]]
  413. # ## Bcache sets path
  414. # ## If not specified, then default is:
  415. # bcachePath = "/sys/fs/bcache"
  416. #
  417. # ## By default, telegraf gather stats for all bcache devices
  418. # ## Setting devices will restrict the stats to the specified
  419. # ## bcache devices.
  420. # bcacheDevs = ["bcache0"]
  421. # # Read Cassandra metrics through Jolokia
  422. # [[inputs.cassandra]]
  423. # # This is the context root used to compose the jolokia url
  424. # context = "/jolokia/read"
  425. # ## List of cassandra servers exposing jolokia read service
  426. # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
  427. # ## List of metrics collected on above servers
  428. # ## Each metric consists of a jmx path.
  429. # ## This will collect all heap memory usage metrics from the jvm and
  430. # ## ReadLatency metrics for all keyspaces and tables.
  431. # ## "type=Table" in the query works with Cassandra3.0. Older versions might
  432. # ## need to use "type=ColumnFamily"
  433. # metrics = [
  434. # "/java.lang:type=Memory/HeapMemoryUsage",
  435. # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
  436. # ]
  437. # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
  438. # [[inputs.ceph]]
  439. # ## All configuration values are optional, defaults are shown below
  440. #
  441. # ## location of ceph binary
  442. # ceph_binary = "/usr/bin/ceph"
  443. #
  444. # ## directory in which to look for socket files
  445. # socket_dir = "/var/run/ceph"
  446. #
  447. # ## prefix of MON and OSD socket files, used to determine socket type
  448. # mon_prefix = "ceph-mon"
  449. # osd_prefix = "ceph-osd"
  450. #
  451. # ## suffix used to identify socket files
  452. # socket_suffix = "asok"
  453. # # Read specific statistics per cgroup
  454. # [[inputs.cgroup]]
  455. # ## Directories in which to look for files, globs are supported.
  456. # # paths = [
  457. # # "/cgroup/memory",
  458. # # "/cgroup/memory/child1",
  459. # # "/cgroup/memory/child2/*",
  460. # # ]
  461. # ## cgroup stat fields, as file names, globs are supported.
  462. # ## these file names are appended to each path from above.
  463. # # files = ["memory.*usage*", "memory.limit_in_bytes"]
  464. # # Get standard chrony metrics, requires chronyc executable.
  465. # [[inputs.chrony]]
  466. # ## If true, chronyc tries to perform a DNS lookup for the time server.
  467. # # dns_lookup = false
  468. # # Pull Metric Statistics from Amazon CloudWatch
  469. # [[inputs.cloudwatch]]
  470. # ## Amazon Region
  471. # region = 'us-east-1'
  472. #
  473. # ## Amazon Credentials
  474. # ## Credentials are loaded in the following order
  475. # ## 1) Assumed credentials via STS if role_arn is specified
  476. # ## 2) explicit credentials from 'access_key' and 'secret_key'
  477. # ## 3) shared profile from 'profile'
  478. # ## 4) environment variables
  479. # ## 5) shared credentials file
  480. # ## 6) EC2 Instance Profile
  481. # #access_key = ""
  482. # #secret_key = ""
  483. # #token = ""
  484. # #role_arn = ""
  485. # #profile = ""
  486. # #shared_credential_file = ""
  487. #
  488. # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
  489. # period = '1m'
  490. #
  491. # ## Collection Delay (required - must account for metrics availability via CloudWatch API)
  492. # delay = '1m'
  493. #
  494. # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
  495. # ## gaps or overlap in pulled data
  496. # interval = '1m'
  497. #
  498. # ## Configure the TTL for the internal cache of metrics.
  499. # ## Defaults to 1 hr if not specified
  500. # #cache_ttl = '10m'
  501. #
  502. # ## Metric Statistic Namespace (required)
  503. # namespace = 'AWS/ELB'
  504. #
  505. # ## Metrics to Pull (optional)
  506. # ## Defaults to all Metrics in Namespace if nothing is provided
  507. # ## Refreshes Namespace available metrics every 1h
  508. # #[[inputs.cloudwatch.metrics]]
  509. # # names = ['Latency', 'RequestCount']
  510. # #
  511. # # ## Dimension filters for Metric (optional)
  512. # # [[inputs.cloudwatch.metrics.dimensions]]
  513. # # name = 'LoadBalancerName'
  514. # # value = 'p-example'
  515. # # Collects conntrack stats from the configured directories and files.
  516. # [[inputs.conntrack]]
  517. # ## The following defaults would work with multiple versions of conntrack.
  518. # ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
  519. # ## kernel versions, as are the directory locations.
  520. #
  521. # ## Superset of filenames to look for within the conntrack dirs.
  522. # ## Missing files will be ignored.
  523. # files = ["ip_conntrack_count","ip_conntrack_max",
  524. # "nf_conntrack_count","nf_conntrack_max"]
  525. #
  526. # ## Directories to search within for the conntrack files above.
  527. # ## Missing directrories will be ignored.
  528. # dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
  529. # # Gather health check statuses from services registered in Consul
  530. # [[inputs.consul]]
  531. # ## Most of these values defaults to the one configured on a Consul's agent level.
  532. # ## Optional Consul server address (default: "localhost")
  533. # # address = "localhost"
  534. # ## Optional URI scheme for the Consul server (default: "http")
  535. # # scheme = "http"
  536. # ## Optional ACL token used in every request (default: "")
  537. # # token = ""
  538. # ## Optional username used for request HTTP Basic Authentication (default: "")
  539. # # username = ""
  540. # ## Optional password used for HTTP Basic Authentication (default: "")
  541. # # password = ""
  542. # ## Optional data centre to query the health checks from (default: "")
  543. # # datacentre = ""
  544. # # Read metrics from one or many couchbase clusters
  545. # [[inputs.couchbase]]
  546. # ## specify servers via a url matching:
  547. # ## [protocol://][:password]@address[:port]
  548. # ## e.g.
  549. # ## http://couchbase-0.example.com/
  550. # ## http://admin:secret@couchbase-0.example.com:8091/
  551. # ##
  552. # ## If no servers are specified, then localhost is used as the host.
  553. # ## If no protocol is specifed, HTTP is used.
  554. # ## If no port is specified, 8091 is used.
  555. # servers = ["http://localhost:8091"]
  556. # # Read CouchDB Stats from one or more servers
  557. # [[inputs.couchdb]]
  558. # ## Works with CouchDB stats endpoints out of the box
  559. # ## Multiple HOSTs from which to read CouchDB stats:
  560. # hosts = ["http://localhost:8086/_stats"]
  561. # # Read metrics from one or many disque servers
  562. # [[inputs.disque]]
  563. # ## An array of URI to gather stats about. Specify an ip or hostname
  564. # ## with optional port and password.
  565. # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
  566. # ## If no servers are specified, then localhost is used as the host.
  567. # servers = ["localhost"]
  568. # # Query given DNS server and gives statistics
  569. # [[inputs.dns_query]]
  570. # ## servers to query
  571. # servers = ["8.8.8.8"] # required
  572. #
  573. # ## Domains or subdomains to query. "."(root) is default
  574. # domains = ["."] # optional
  575. #
  576. # ## Query record type. Default is "A"
  577. # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
  578. # record_type = "A" # optional
  579. #
  580. # ## Dns server port. 53 is default
  581. # port = 53 # optional
  582. #
  583. # ## Query timeout in seconds. Default is 2 seconds
  584. # timeout = 2 # optional
  585. # # Read metrics about docker containers
  586. # [[inputs.docker]]
  587. # ## Docker Endpoint
  588. # ## To use TCP, set endpoint = "tcp://[ip]:[port]"
  589. # ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
  590. # endpoint = "unix:///var/run/docker.sock"
  591. # ## Only collect metrics for these containers, collect all if empty
  592. # container_names = []
  593. # ## Timeout for docker list, info, and stats commands
  594. # timeout = "5s"
  595. #
  596. # ## Whether to report for each container per-device blkio (8:0, 8:1...) and
  597. # ## network (eth0, eth1, ...) stats or not
  598. # perdevice = true
  599. # ## Whether to report for each container total blkio and network stats or not
  600. # total = false
  601. #
  602. # # Read statistics from one or many dovecot servers
  603. # [[inputs.dovecot]]
  604. # ## specify dovecot servers via an address:port list
  605. # ## e.g.
  606. # ## localhost:24242
  607. # ##
  608. # ## If no servers are specified, then localhost is used as the host.
  609. # servers = ["localhost:24242"]
  610. # ## Type is one of "user", "domain", "ip", or "global"
  611. # type = "global"
  612. # ## Wildcard matches like "*.com". An empty string "" is same as "*"
  613. # ## If type = "ip" filters should be <IP/network>
  614. # filters = [""]
  615. # # Read stats from one or more Elasticsearch servers or clusters
  616. # [[inputs.elasticsearch]]
  617. # ## specify a list of one or more Elasticsearch servers
  618. # servers = ["http://localhost:9200"]
  619. #
  620. # ## set local to false when you want to read the indices stats from all nodes
  621. # ## within the cluster
  622. # local = true
  623. #
  624. # ## set cluster_health to true when you want to also obtain cluster level stats
  625. # cluster_health = false
  626. #
  627. # ## Optional SSL Config
  628. # # ssl_ca = "/etc/telegraf/ca.pem"
  629. # # ssl_cert = "/etc/telegraf/cert.pem"
  630. # # ssl_key = "/etc/telegraf/key.pem"
  631. # ## Use SSL but skip chain & host verification
  632. # # insecure_skip_verify = false
  633. # # Read metrics from one or more commands that can output to stdout
  634. # [[inputs.exec]]
  635. # ## Commands array
  636. # commands = [
  637. # "/tmp/test.sh",
  638. # "/usr/bin/mycollector --foo=bar",
  639. # "/tmp/collect_*.sh"
  640. # ]
  641. #
  642. # ## Timeout for each command to complete.
  643. # timeout = "5s"
  644. #
  645. # ## measurement name suffix (for separating different commands)
  646. # name_suffix = "_mycollector"
  647. #
  648. # ## Data format to consume.
  649. # ## Each data format has it's own unique set of configuration options, read
  650. # ## more about them here:
  651. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  652. # data_format = "influx"
  653. # # Read stats about given file(s)
  654. # [[inputs.filestat]]
  655. # ## Files to gather stats about.
  656. # ## These accept standard unix glob matching rules, but with the addition of
  657. # ## ** as a "super asterisk". ie:
  658. # ## "/var/log/**.log" -> recursively find all .log files in /var/log
  659. # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  660. # ## "/var/log/apache.log" -> just tail the apache log file
  661. # ##
  662. # ## See https://github.com/gobwas/glob for more examples
  663. # ##
  664. # files = ["/var/log/**.log"]
  665. # ## If true, read the entire file and calculate an md5 checksum.
  666. # md5 = false
  667. # # Read flattened metrics from one or more GrayLog HTTP endpoints
  668. # [[inputs.graylog]]
  669. # ## API endpoint, currently supported API:
  670. # ##
  671. # ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
  672. # ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
  673. # ##
  674. # ## For namespace endpoint, the metrics array will be ignored for that call.
  675. # ## Endpoint can contain namespace and multiple type calls.
  676. # ##
  677. # ## Please check http://[graylog-server-ip]:12900/api-browser for full list
  678. # ## of endpoints
  679. # servers = [
  680. # "http://[graylog-server-ip]:12900/system/metrics/multiple",
  681. # ]
  682. #
  683. # ## Metrics list
  684. # ## List of metrics can be found on Graylog webservice documentation.
  685. # ## Or by hitting the the web service api at:
  686. # ## http://[graylog-host]:12900/system/metrics
  687. # metrics = [
  688. # "jvm.cl.loaded",
  689. # "jvm.memory.pools.Metaspace.committed"
  690. # ]
  691. #
  692. # ## Username and password
  693. # username = ""
  694. # password = ""
  695. #
  696. # ## Optional SSL Config
  697. # # ssl_ca = "/etc/telegraf/ca.pem"
  698. # # ssl_cert = "/etc/telegraf/cert.pem"
  699. # # ssl_key = "/etc/telegraf/key.pem"
  700. # ## Use SSL but skip chain & host verification
  701. # # insecure_skip_verify = false
  702. # # Read metrics of haproxy, via socket or csv stats page
  703. # [[inputs.haproxy]]
  704. # ## An array of address to gather stats about. Specify an ip on hostname
  705. # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
  706. # ## Make sure you specify the complete path to the stats endpoint
  707. # ## ie 10.10.3.33:1936/haproxy?stats
  708. # #
  709. # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
  710. # servers = ["http://myhaproxy.com:1936/haproxy?stats"]
  711. # ## Or you can also use local socket
  712. # ## servers = ["socket:/run/haproxy/admin.sock"]
  713. # # Monitor disks' temperatures using hddtemp
  714. # [[inputs.hddtemp]]
  715. # ## By default, telegraf gathers temps data from all disks detected by the
  716. # ## hddtemp.
  717. # ##
  718. # ## Only collect temps from the selected disks.
  719. # ##
  720. # ## A * as the device name will return the temperature values of all disks.
  721. # ##
  722. # # address = "127.0.0.1:7634"
  723. # # devices = ["sda", "*"]
  724. # # HTTP/HTTPS request given an address a method and a timeout
  725. # [[inputs.http_response]]
  726. # ## Server address (default http://localhost)
  727. # address = "http://github.com"
  728. # ## Set response_timeout (default 5 seconds)
  729. # response_timeout = "5s"
  730. # ## HTTP Request Method
  731. # method = "GET"
  732. # ## Whether to follow redirects from the server (defaults to false)
  733. # follow_redirects = true
  734. # ## HTTP Request Headers (all values must be strings)
  735. # # [inputs.http_response.headers]
  736. # # Host = "github.com"
  737. # ## Optional HTTP Request Body
  738. # # body = '''
  739. # # {'fake':'data'}
  740. # # '''
  741. #
  742. # ## Optional SSL Config
  743. # # ssl_ca = "/etc/telegraf/ca.pem"
  744. # # ssl_cert = "/etc/telegraf/cert.pem"
  745. # # ssl_key = "/etc/telegraf/key.pem"
  746. # ## Use SSL but skip chain & host verification
  747. # # insecure_skip_verify = false
  748. # # Read flattened metrics from one or more JSON HTTP endpoints
  749. # [[inputs.httpjson]]
  750. # ## NOTE This plugin only reads numerical measurements, strings and booleans
  751. # ## will be ignored.
  752. #
  753. # ## a name for the service being polled
  754. # name = "webserver_stats"
  755. #
  756. # ## URL of each server in the service's cluster
  757. # servers = [
  758. # "http://localhost:9999/stats/",
  759. # "http://localhost:9998/stats/",
  760. # ]
  761. #
  762. # ## HTTP method to use: GET or POST (case-sensitive)
  763. # method = "GET"
  764. #
  765. # ## List of tag names to extract from top-level of JSON server response
  766. # # tag_keys = [
  767. # # "my_tag_1",
  768. # # "my_tag_2"
  769. # # ]
  770. #
  771. # ## HTTP parameters (all values must be strings)
  772. # [inputs.httpjson.parameters]
  773. # event_type = "cpu_spike"
  774. # threshold = "0.75"
  775. #
  776. # ## HTTP Header parameters (all values must be strings)
  777. # # [inputs.httpjson.headers]
  778. # # X-Auth-Token = "my-xauth-token"
  779. # # apiVersion = "v1"
  780. #
  781. # ## Optional SSL Config
  782. # # ssl_ca = "/etc/telegraf/ca.pem"
  783. # # ssl_cert = "/etc/telegraf/cert.pem"
  784. # # ssl_key = "/etc/telegraf/key.pem"
  785. # ## Use SSL but skip chain & host verification
  786. # # insecure_skip_verify = false
  787. # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
  788. # [[inputs.influxdb]]
  789. # ## Works with InfluxDB debug endpoints out of the box,
  790. # ## but other services can use this format too.
  791. # ## See the influxdb plugin's README for more details.
  792. #
  793. # ## Multiple URLs from which to read InfluxDB-formatted JSON
  794. # ## Default is "http://localhost:8086/debug/vars".
  795. # urls = [
  796. # "http://localhost:8086/debug/vars"
  797. # ]
  798. # # Read metrics from one or many bare metal servers
  799. # [[inputs.ipmi_sensor]]
  800. # ## specify servers via a url matching:
  801. # ## [username[:password]@][protocol[(address)]]
  802. # ## e.g.
  803. # ## root:passwd@lan(127.0.0.1)
  804. # ##
  805. # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
  806. # # Read JMX metrics through Jolokia
  807. # [[inputs.jolokia]]
  808. # ## This is the context root used to compose the jolokia url
  809. # context = "/jolokia"
  810. #
  811. # ## This specifies the mode used
  812. # # mode = "proxy"
  813. # #
  814. # ## When in proxy mode this section is used to specify further
  815. # ## proxy address configurations.
  816. # ## Remember to change host address to fit your environment.
  817. # # [inputs.jolokia.proxy]
  818. # # host = "127.0.0.1"
  819. # # port = "8080"
  820. #
  821. #
  822. # ## List of servers exposing jolokia read service
  823. # [[inputs.jolokia.servers]]
  824. # name = "as-server-01"
  825. # host = "127.0.0.1"
  826. # port = "8080"
  827. # # username = "myuser"
  828. # # password = "mypassword"
  829. #
  830. # ## List of metrics collected on above servers
  831. # ## Each metric consists in a name, a jmx path and either
  832. # ## a pass or drop slice attribute.
  833. # ## This collect all heap memory usage metrics.
  834. # [[inputs.jolokia.metrics]]
  835. # name = "heap_memory_usage"
  836. # mbean = "java.lang:type=Memory"
  837. # attribute = "HeapMemoryUsage"
  838. #
  839. # ## This collect thread counts metrics.
  840. # [[inputs.jolokia.metrics]]
  841. # name = "thread_count"
  842. # mbean = "java.lang:type=Threading"
  843. # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
  844. #
  845. # ## This collect number of class loaded/unloaded counts metrics.
  846. # [[inputs.jolokia.metrics]]
  847. # name = "class_count"
  848. # mbean = "java.lang:type=ClassLoading"
  849. # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
  850. # # Get kernel statistics from /proc/vmstat
  851. # [[inputs.kernel_vmstat]]
  852. # # no configuration
  853. # # Read metrics from a LeoFS Server via SNMP
  854. # [[inputs.leofs]]
  855. # ## An array of URI to gather stats about LeoFS.
  856. # ## Specify an ip or hostname with port. ie 127.0.0.1:4020
  857. # servers = ["127.0.0.1:4021"]
  858. # # Read metrics from local Lustre service on OST, MDS
  859. # [[inputs.lustre2]]
  860. # ## An array of /proc globs to search for Lustre stats
  861. # ## If not specified, the default will work on Lustre 2.5.x
  862. # ##
  863. # # ost_procfiles = [
  864. # # "/proc/fs/lustre/obdfilter/*/stats",
  865. # # "/proc/fs/lustre/osd-ldiskfs/*/stats",
  866. # # "/proc/fs/lustre/obdfilter/*/job_stats",
  867. # # ]
  868. # # mds_procfiles = [
  869. # # "/proc/fs/lustre/mdt/*/md_stats",
  870. # # "/proc/fs/lustre/mdt/*/job_stats",
  871. # # ]
  872. # # Gathers metrics from the /3.0/reports MailChimp API
  873. # [[inputs.mailchimp]]
  874. # ## MailChimp API key
  875. # ## get from https://admin.mailchimp.com/account/api/
  876. # api_key = "" # required
  877. # ## Reports for campaigns sent more than days_old ago will not be collected.
  878. # ## 0 means collect all.
  879. # days_old = 0
  880. # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
  881. # # campaign_id = ""
  882. # # Read metrics from one or many memcached servers
  883. # [[inputs.memcached]]
  884. # ## An array of address to gather stats about. Specify an ip on hostname
  885. # ## with optional port. ie localhost, 10.0.0.1:11211, etc.
  886. # servers = ["localhost:11211"]
  887. # # unix_sockets = ["/var/run/memcached.sock"]
  888. # # Telegraf plugin for gathering metrics from N Mesos masters
  889. # [[inputs.mesos]]
  890. # ## Timeout, in ms.
  891. # timeout = 100
  892. # ## A list of Mesos masters.
  893. # masters = ["localhost:5050"]
  894. # ## Master metrics groups to be collected, by default, all enabled.
  895. # master_collections = [
  896. # "resources",
  897. # "master",
  898. # "system",
  899. # "agents",
  900. # "frameworks",
  901. # "tasks",
  902. # "messages",
  903. # "evqueue",
  904. # "registrar",
  905. # ]
  906. # ## A list of Mesos slaves, default is []
  907. # # slaves = []
  908. # ## Slave metrics groups to be collected, by default, all enabled.
  909. # # slave_collections = [
  910. # # "resources",
  911. # # "agent",
  912. # # "system",
  913. # # "executors",
  914. # # "tasks",
  915. # # "messages",
  916. # # ]
  917. # ## Include mesos tasks statistics, default is false
  918. # # slave_tasks = true
  919. # # Read metrics from one or many MongoDB servers
  920. # [[inputs.mongodb]]
  921. # ## An array of URI to gather stats about. Specify an ip or hostname
  922. # ## with optional port add password. ie,
  923. # ## mongodb://user:auth_key@10.10.3.30:27017,
  924. # ## mongodb://10.10.3.33:18832,
  925. # ## 10.0.0.1:10000, etc.
  926. # servers = ["127.0.0.1:27017"]
  927. # gather_perdb_stats = false
  928. # # Read metrics from one or many mysql servers
  929. # [[inputs.mysql]]
  930. # ## specify servers via a url matching:
  931. # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
  932. # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
  933. # ## e.g.
  934. # ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
  935. # ## db_user@tcp(127.0.0.1:3306)/?tls=false
  936. # #
  937. # ## If no servers are specified, then localhost is used as the host.
  938. # servers = ["tcp(127.0.0.1:3306)/"]
  939. # ## the limits for metrics form perf_events_statements
  940. # perf_events_statements_digest_text_limit = 120
  941. # perf_events_statements_limit = 250
  942. # perf_events_statements_time_limit = 86400
  943. # #
  944. # ## if the list is empty, then metrics are gathered from all databasee tables
  945. # table_schema_databases = []
  946. # #
  947. # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
  948. # gather_table_schema = false
  949. # #
  950. # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
  951. # gather_process_list = true
  952. # #
  953. # ## gather auto_increment columns and max values from information schema
  954. # gather_info_schema_auto_inc = true
  955. # #
  956. # ## gather metrics from SHOW SLAVE STATUS command output
  957. # gather_slave_status = true
  958. # #
  959. # ## gather metrics from SHOW BINARY LOGS command output
  960. # gather_binary_logs = false
  961. # #
  962. # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
  963. # gather_table_io_waits = false
  964. # #
  965. # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
  966. # gather_table_lock_waits = false
  967. # #
  968. # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
  969. # gather_index_io_waits = false
  970. # #
  971. # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
  972. # gather_event_waits = false
  973. # #
  974. # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
  975. # gather_file_events_stats = false
  976. # #
  977. # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
  978. # gather_perf_events_statements = false
  979. # #
  980. # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
  981. # interval_slow = "30m"
  982. # # Read metrics about network interface usage
  983. # [[inputs.net]]
  984. # ## By default, telegraf gathers stats from any up interface (excluding loopback)
  985. # ## Setting interfaces will tell it to gather these explicit interfaces,
  986. # ## regardless of status.
  987. # ##
  988. # # interfaces = ["eth0"]
  989. # # TCP or UDP 'ping' given url and collect response time in seconds
  990. # [[inputs.net_response]]
  991. # ## Protocol, must be "tcp" or "udp"
  992. # protocol = "tcp"
  993. # ## Server address (default localhost)
  994. # address = "github.com:80"
  995. # ## Set timeout
  996. # timeout = "1s"
  997. #
  998. # ## Optional string sent to the server
  999. # # send = "ssh"
  1000. # ## Optional expected string in answer
  1001. # # expect = "ssh"
  1002. # ## Set read timeout (only used if expecting a response)
  1003. # read_timeout = "1s"
  1004. # # Read TCP metrics such as established, time wait and sockets counts.
  1005. # [[inputs.netstat]]
  1006. # # no configuration
  1007. # # Read Nginx's basic status information (ngx_http_stub_status_module)
  1008. # [[inputs.nginx]]
  1009. # ## An array of Nginx stub_status URI to gather stats.
  1010. # urls = ["http://localhost/status"]
  1011. # # Read NSQ topic and channel statistics.
  1012. # [[inputs.nsq]]
  1013. # ## An array of NSQD HTTP API endpoints
  1014. # endpoints = ["http://localhost:4151"]
  1015. # # Collect kernel snmp counters and network interface statistics
  1016. # [[inputs.nstat]]
  1017. # ## file paths for proc files. If empty default paths will be used:
  1018. # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
  1019. # ## These can also be overridden with env variables, see README.
  1020. # proc_net_netstat = "/proc/net/netstat"
  1021. # proc_net_snmp = "/proc/net/snmp"
  1022. # proc_net_snmp6 = "/proc/net/snmp6"
  1023. # ## dump metrics with 0 values too
  1024. # dump_zeros = true
  1025. # # Get standard NTP query metrics, requires ntpq executable.
  1026. # [[inputs.ntpq]]
  1027. # ## If false, set the -n ntpq flag. Can reduce metric gather time.
  1028. # dns_lookup = true
  1029. # # Read metrics of passenger using passenger-status
  1030. # [[inputs.passenger]]
  1031. # ## Path of passenger-status.
  1032. # ##
  1033. # ## Plugin gather metric via parsing XML output of passenger-status
  1034. # ## More information about the tool:
  1035. # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
  1036. # ##
  1037. # ## If no path is specified, then the plugin simply execute passenger-status
  1038. # ## hopefully it can be found in your PATH
  1039. # command = "passenger-status -v --show=xml"
  1040. # # Read metrics of phpfpm, via HTTP status page or socket
  1041. # [[inputs.phpfpm]]
  1042. # ## An array of addresses to gather stats about. Specify an ip or hostname
  1043. # ## with optional port and path
  1044. # ##
  1045. # ## Plugin can be configured in three modes (either can be used):
  1046. # ## - http: the URL must start with http:// or https://, ie:
  1047. # ## "http://localhost/status"
  1048. # ## "http://192.168.130.1/status?full"
  1049. # ##
  1050. # ## - unixsocket: path to fpm socket, ie:
  1051. # ## "/var/run/php5-fpm.sock"
  1052. # ## or using a custom fpm status path:
  1053. # ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
  1054. # ##
  1055. # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
  1056. # ## "fcgi://10.0.0.12:9000/status"
  1057. # ## "cgi://10.0.10.12:9001/status"
  1058. # ##
  1059. # ## Example of multiple gathering from local socket and remove host
  1060. # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
  1061. # urls = ["http://localhost/status"]
  1062. # # Ping given url(s) and return statistics
  1063. # [[inputs.ping]]
  1064. # ## NOTE: this plugin forks the ping command. You may need to set capabilities
  1065. # ## via setcap cap_net_raw+p /bin/ping
  1066. # #
  1067. # ## urls to ping
  1068. # urls = ["www.google.com"] # required
  1069. # ## number of pings to send per collection (ping -c <COUNT>)
  1070. # count = 1 # required
  1071. # ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
  1072. # ping_interval = 0.0
  1073. # ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
  1074. # timeout = 1.0
  1075. # ## interface to send ping from (ping -I <INTERFACE>)
  1076. # interface = ""
  1077. # # Read metrics from one or many postgresql servers
  1078. # [[inputs.postgresql]]
  1079. # ## specify address via a url matching:
  1080. # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
  1081. # ## ?sslmode=[disable|verify-ca|verify-full]
  1082. # ## or a simple string:
  1083. # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1084. # ##
  1085. # ## All connection parameters are optional.
  1086. # ##
  1087. # ## Without the dbname parameter, the driver will default to a database
  1088. # ## with the same name as the user. This dbname is just for instantiating a
  1089. # ## connection with the server and doesn't restrict the databases we are trying
  1090. # ## to grab metrics for.
  1091. # ##
  1092. # address = "host=localhost user=postgres sslmode=disable"
  1093. #
  1094. # ## A list of databases to pull metrics about. If not specified, metrics for all
  1095. # ## databases are gathered.
  1096. # # databases = ["app_production", "testing"]
  1097. # # Read metrics from one or many postgresql servers
  1098. # [[inputs.postgresql_extensible]]
  1099. # ## specify address via a url matching:
  1100. # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
  1101. # ## ?sslmode=[disable|verify-ca|verify-full]
  1102. # ## or a simple string:
  1103. # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1104. # #
  1105. # ## All connection parameters are optional. #
  1106. # ## Without the dbname parameter, the driver will default to a database
  1107. # ## with the same name as the user. This dbname is just for instantiating a
  1108. # ## connection with the server and doesn't restrict the databases we are trying
  1109. # ## to grab metrics for.
  1110. # #
  1111. # address = "host=localhost user=postgres sslmode=disable"
  1112. # ## A list of databases to pull metrics about. If not specified, metrics for all
  1113. # ## databases are gathered.
  1114. # ## databases = ["app_production", "testing"]
  1115. # #
  1116. # # outputaddress = "db01"
  1117. # ## A custom name for the database that will be used as the "server" tag in the
  1118. # ## measurement output. If not specified, a default one generated from
  1119. # ## the connection address is used.
  1120. # #
  1121. # ## Define the toml config where the sql queries are stored
  1122. # ## New queries can be added, if the withdbname is set to true and there is no
  1123. # ## databases defined in the 'databases field', the sql query is ended by a
  1124. # ## 'is not null' in order to make the query succeed.
  1125. # ## Example :
  1126. # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
  1127. # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
  1128. # ## because the databases variable was set to ['postgres', 'pgbench' ] and the
  1129. # ## withdbname was true. Be careful that if the withdbname is set to false you
  1130. # ## don't have to define the where clause (aka with the dbname) the tagvalue
  1131. # ## field is used to define custom tags (separated by commas)
  1132. # ## The optional "measurement" value can be used to override the default
  1133. # ## output measurement name ("postgresql").
  1134. # #
  1135. # ## Structure :
  1136. # ## [[inputs.postgresql_extensible.query]]
  1137. # ## sqlquery string
  1138. # ## version string
  1139. # ## withdbname boolean
  1140. # ## tagvalue string (comma separated)
  1141. # ## measurement string
  1142. # [[inputs.postgresql_extensible.query]]
  1143. # sqlquery="SELECT * FROM pg_stat_database"
  1144. # version=901
  1145. # withdbname=false
  1146. # tagvalue=""
  1147. # measurement=""
  1148. # [[inputs.postgresql_extensible.query]]
  1149. # sqlquery="SELECT * FROM pg_stat_bgwriter"
  1150. # version=901
  1151. # withdbname=false
  1152. # tagvalue="postgresql.stats"
  1153. # # Read metrics from one or many PowerDNS servers
  1154. # [[inputs.powerdns]]
  1155. # ## An array of sockets to gather stats about.
  1156. # ## Specify a path to unix socket.
  1157. # unix_sockets = ["/var/run/pdns.controlsocket"]
  1158. # # Monitor process cpu and memory usage
  1159. # [[inputs.procstat]]
  1160. # ## Must specify one of: pid_file, exe, or pattern
  1161. # ## PID file to monitor process
  1162. # pid_file = "/var/run/nginx.pid"
  1163. # ## executable name (ie, pgrep <exe>)
  1164. # # exe = "nginx"
  1165. # ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
  1166. # # pattern = "nginx"
  1167. # ## user as argument for pgrep (ie, pgrep -u <user>)
  1168. # # user = "nginx"
  1169. #
  1170. # ## override for process_name
  1171. # ## This is optional; default is sourced from /proc/<pid>/status
  1172. # # process_name = "bar"
  1173. # ## Field name prefix
  1174. # prefix = ""
  1175. # ## comment this out if you want raw cpu_time stats
  1176. # fielddrop = ["cpu_time_*"]
  1177. # # Read metrics from one or many prometheus clients
  1178. # [[inputs.prometheus]]
  1179. # ## An array of urls to scrape metrics from.
  1180. # urls = ["http://localhost:9100/metrics"]
  1181. #
  1182. # ## Use bearer token for authorization
  1183. # # bearer_token = /path/to/bearer/token
  1184. #
  1185. # ## Optional SSL Config
  1186. # # ssl_ca = /path/to/cafile
  1187. # # ssl_cert = /path/to/certfile
  1188. # # ssl_key = /path/to/keyfile
  1189. # ## Use SSL but skip chain & host verification
  1190. # # insecure_skip_verify = false
  1191. # # Reads last_run_summary.yaml file and converts to measurments
  1192. # [[inputs.puppetagent]]
  1193. # ## Location of puppet last run summary file
  1194. # location = "/var/lib/puppet/state/last_run_summary.yaml"
  1195. # # Read metrics from one or many RabbitMQ servers via the management API
  1196. # [[inputs.rabbitmq]]
  1197. # # url = "http://localhost:15672"
  1198. # # name = "rmq-server-1" # optional tag
  1199. # # username = "guest"
  1200. # # password = "guest"
  1201. #
  1202. # ## Optional SSL Config
  1203. # # ssl_ca = "/etc/telegraf/ca.pem"
  1204. # # ssl_cert = "/etc/telegraf/cert.pem"
  1205. # # ssl_key = "/etc/telegraf/key.pem"
  1206. # ## Use SSL but skip chain & host verification
  1207. # # insecure_skip_verify = false
  1208. #
  1209. # ## A list of nodes to pull metrics about. If not specified, metrics for
  1210. # ## all nodes are gathered.
  1211. # # nodes = ["rabbit@node1", "rabbit@node2"]
  1212. # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
  1213. # [[inputs.raindrops]]
  1214. # ## An array of raindrops middleware URI to gather stats.
  1215. # urls = ["http://localhost:8080/_raindrops"]
  1216. # # Read metrics from one or many redis servers
  1217. # [[inputs.redis]]
  1218. # ## specify servers via a url matching:
  1219. # ## [protocol://][:password]@address[:port]
  1220. # ## e.g.
  1221. # ## tcp://localhost:6379
  1222. # ## tcp://:password@192.168.99.100
  1223. # ## unix:///var/run/redis.sock
  1224. # ##
  1225. # ## If no servers are specified, then localhost is used as the host.
  1226. # ## If no port is specified, 6379 is used
  1227. # servers = ["tcp://localhost:6379"]
  1228. # # Read metrics from one or many RethinkDB servers
  1229. # [[inputs.rethinkdb]]
  1230. # ## An array of URI to gather stats about. Specify an ip or hostname
  1231. # ## with optional port add password. ie,
  1232. # ## rethinkdb://user:auth_key@10.10.3.30:28105,
  1233. # ## rethinkdb://10.10.3.33:18832,
  1234. # ## 10.0.0.1:10000, etc.
  1235. # servers = ["127.0.0.1:28015"]
  1236. # # Read metrics one or many Riak servers
  1237. # [[inputs.riak]]
  1238. # # Specify a list of one or more riak http servers
  1239. # servers = ["http://localhost:8098"]
  1240. # # Monitor sensors, requires lm-sensors package
  1241. # [[inputs.sensors]]
  1242. # ## Remove numbers from field names.
  1243. # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
  1244. # # remove_numbers = true
  1245. # # Retrieves SNMP values from remote agents
  1246. # [[inputs.snmp]]
  1247. # agents = [ "127.0.0.1:161" ]
  1248. # timeout = "5s"
  1249. # version = 2
  1250. #
  1251. # # SNMPv1 & SNMPv2 parameters
  1252. # community = "public"
  1253. #
  1254. # # SNMPv2 & SNMPv3 parameters
  1255. # max_repetitions = 50
  1256. #
  1257. # # SNMPv3 parameters
  1258. # #sec_name = "myuser"
  1259. # #auth_protocol = "md5" # Values: "MD5", "SHA", ""
  1260. # #auth_password = "password123"
  1261. # #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
  1262. # #context_name = ""
  1263. # #priv_protocol = "" # Values: "DES", "AES", ""
  1264. # #priv_password = ""
  1265. #
  1266. # # measurement name
  1267. # name = "system"
  1268. # [[inputs.snmp.field]]
  1269. # name = "hostname"
  1270. # oid = ".1.0.0.1.1"
  1271. # [[inputs.snmp.field]]
  1272. # name = "uptime"
  1273. # oid = ".1.0.0.1.2"
  1274. # [[inputs.snmp.field]]
  1275. # name = "load"
  1276. # oid = ".1.0.0.1.3"
  1277. # [[inputs.snmp.field]]
  1278. # oid = "HOST-RESOURCES-MIB::hrMemorySize"
  1279. #
  1280. # [[inputs.snmp.table]]
  1281. # # measurement name
  1282. # name = "remote_servers"
  1283. # inherit_tags = [ "hostname" ]
  1284. # [[inputs.snmp.table.field]]
  1285. # name = "server"
  1286. # oid = ".1.0.0.0.1.0"
  1287. # is_tag = true
  1288. # [[inputs.snmp.table.field]]
  1289. # name = "connections"
  1290. # oid = ".1.0.0.0.1.1"
  1291. # [[inputs.snmp.table.field]]
  1292. # name = "latency"
  1293. # oid = ".1.0.0.0.1.2"
  1294. #
  1295. # [[inputs.snmp.table]]
  1296. # # auto populate table's fields using the MIB
  1297. # oid = "HOST-RESOURCES-MIB::hrNetworkTable"
  1298. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
  1299. # [[inputs.snmp_legacy]]
  1300. # ## Use 'oids.txt' file to translate oids to names
  1301. # ## To generate 'oids.txt' you need to run:
  1302. # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
  1303. # ## Or if you have an other MIB folder with custom MIBs
  1304. # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
  1305. # snmptranslate_file = "/tmp/oids.txt"
  1306. # [[inputs.snmp.host]]
  1307. # address = "192.168.2.2:161"
  1308. # # SNMP community
  1309. # community = "public" # default public
  1310. # # SNMP version (1, 2 or 3)
  1311. # # Version 3 not supported yet
  1312. # version = 2 # default 2
  1313. # # SNMP response timeout
  1314. # timeout = 2.0 # default 2.0
  1315. # # SNMP request retries
  1316. # retries = 2 # default 2
  1317. # # Which get/bulk do you want to collect for this host
  1318. # collect = ["mybulk", "sysservices", "sysdescr"]
  1319. # # Simple list of OIDs to get, in addition to "collect"
  1320. # get_oids = []
  1321. #
  1322. # [[inputs.snmp.host]]
  1323. # address = "192.168.2.3:161"
  1324. # community = "public"
  1325. # version = 2
  1326. # timeout = 2.0
  1327. # retries = 2
  1328. # collect = ["mybulk"]
  1329. # get_oids = [
  1330. # "ifNumber",
  1331. # ".1.3.6.1.2.1.1.3.0",
  1332. # ]
  1333. #
  1334. # [[inputs.snmp.get]]
  1335. # name = "ifnumber"
  1336. # oid = "ifNumber"
  1337. #
  1338. # [[inputs.snmp.get]]
  1339. # name = "interface_speed"
  1340. # oid = "ifSpeed"
  1341. # instance = "0"
  1342. #
  1343. # [[inputs.snmp.get]]
  1344. # name = "sysuptime"
  1345. # oid = ".1.3.6.1.2.1.1.3.0"
  1346. # unit = "second"
  1347. #
  1348. # [[inputs.snmp.bulk]]
  1349. # name = "mybulk"
  1350. # max_repetition = 127
  1351. # oid = ".1.3.6.1.2.1.1"
  1352. #
  1353. # [[inputs.snmp.bulk]]
  1354. # name = "ifoutoctets"
  1355. # max_repetition = 127
  1356. # oid = "ifOutOctets"
  1357. #
  1358. # [[inputs.snmp.host]]
  1359. # address = "192.168.2.13:161"
  1360. # #address = "127.0.0.1:161"
  1361. # community = "public"
  1362. # version = 2
  1363. # timeout = 2.0
  1364. # retries = 2
  1365. # #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
  1366. # collect = ["sysuptime" ]
  1367. # [[inputs.snmp.host.table]]
  1368. # name = "iftable3"
  1369. # include_instances = ["enp5s0", "eth1"]
  1370. #
  1371. # # SNMP TABLEs
  1372. # # table without mapping neither subtables
  1373. # [[inputs.snmp.table]]
  1374. # name = "iftable1"
  1375. # oid = ".1.3.6.1.2.1.31.1.1.1"
  1376. #
  1377. # # table without mapping but with subtables
  1378. # [[inputs.snmp.table]]
  1379. # name = "iftable2"
  1380. # oid = ".1.3.6.1.2.1.31.1.1.1"
  1381. # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
  1382. #
  1383. # # table with mapping but without subtables
  1384. # [[inputs.snmp.table]]
  1385. # name = "iftable3"
  1386. # oid = ".1.3.6.1.2.1.31.1.1.1"
  1387. # # if empty. get all instances
  1388. # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  1389. # # if empty, get all subtables
  1390. #
  1391. # # table with both mapping and subtables
  1392. # [[inputs.snmp.table]]
  1393. # name = "iftable4"
  1394. # oid = ".1.3.6.1.2.1.31.1.1.1"
  1395. # # if empty get all instances
  1396. # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  1397. # # if empty get all subtables
  1398. # # sub_tables could be not "real subtables"
  1399. # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
  1400. # # Read metrics from Microsoft SQL Server
  1401. # [[inputs.sqlserver]]
  1402. # ## Specify instances to monitor with a list of connection strings.
  1403. # ## All connection parameters are optional.
  1404. # ## By default, the host is localhost, listening on default port, TCP 1433.
  1405. # ## for Windows, the user is the currently running AD user (SSO).
  1406. # ## See https://github.com/denisenkom/go-mssqldb for detailed connection
  1407. # ## parameters.
  1408. # # servers = [
  1409. # # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
  1410. # # ]
  1411. # # Sysstat metrics collector
  1412. # [[inputs.sysstat]]
  1413. # ## Path to the sadc command.
  1414. # #
  1415. # ## Common Defaults:
  1416. # ## Debian/Ubuntu: /usr/lib/sysstat/sadc
  1417. # ## Arch: /usr/lib/sa/sadc
  1418. # ## RHEL/CentOS: /usr/lib64/sa/sadc
  1419. # sadc_path = "/usr/lib/sa/sadc" # required
  1420. # #
  1421. # #
  1422. # ## Path to the sadf command, if it is not in PATH
  1423. # # sadf_path = "/usr/bin/sadf"
  1424. # #
  1425. # #
  1426. # ## Activities is a list of activities, that are passed as argument to the
  1427. # ## sadc collector utility (e.g: DISK, SNMP etc...)
  1428. # ## The more activities that are added, the more data is collected.
  1429. # # activities = ["DISK"]
  1430. # #
  1431. # #
  1432. # ## Group metrics to measurements.
  1433. # ##
  1434. # ## If group is false each metric will be prefixed with a description
  1435. # ## and represents itself a measurement.
  1436. # ##
  1437. # ## If Group is true, corresponding metrics are grouped to a single measurement.
  1438. # # group = true
  1439. # #
  1440. # #
  1441. # ## Options for the sadf command. The values on the left represent the sadf
  1442. # ## options and the values on the right their description (wich are used for
  1443. # ## grouping and prefixing metrics).
  1444. # ##
  1445. # ## Run 'sar -h' or 'man sar' to find out the supported options for your
  1446. # ## sysstat version.
  1447. # [inputs.sysstat.options]
  1448. # -C = "cpu"
  1449. # -B = "paging"
  1450. # -b = "io"
  1451. # -d = "disk" # requires DISK activity
  1452. # "-n ALL" = "network"
  1453. # "-P ALL" = "per_cpu"
  1454. # -q = "queue"
  1455. # -R = "mem"
  1456. # -r = "mem_util"
  1457. # -S = "swap_util"
  1458. # -u = "cpu_util"
  1459. # -v = "inode"
  1460. # -W = "swap"
  1461. # -w = "task"
  1462. # # -H = "hugepages" # only available for newer linux distributions
  1463. # # "-I ALL" = "interrupts" # requires INT activity
  1464. # #
  1465. # #
  1466. # ## Device tags can be used to add additional tags for devices.
  1467. # ## For example the configuration below adds a tag vg with value rootvg for
  1468. # ## all metrics with sda devices.
  1469. # # [[inputs.sysstat.device_tags.sda]]
  1470. # # vg = "rootvg"
  1471. # # Inserts sine and cosine waves for demonstration purposes
  1472. # [[inputs.trig]]
  1473. # ## Set the amplitude
  1474. # amplitude = 10.0
  1475. # # Read Twemproxy stats data
  1476. # [[inputs.twemproxy]]
  1477. # ## Twemproxy stats address and port (no scheme)
  1478. # addr = "localhost:22222"
  1479. # ## Monitor pool name
  1480. # pools = ["redis_pool", "mc_pool"]
  1481. # # A plugin to collect stats from Varnish HTTP Cache
  1482. # [[inputs.varnish]]
  1483. # ## The default location of the varnishstat binary can be overridden with:
  1484. # binary = "/usr/bin/varnishstat"
  1485. #
  1486. # ## By default, telegraf gather stats for 3 metric points.
  1487. # ## Setting stats will override the defaults shown below.
  1488. # ## Glob matching can be used, ie, stats = ["MAIN.*"]
  1489. # ## stats may also be set to ["*"], which will collect all stats
  1490. # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
  1491. # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
  1492. # [[inputs.zfs]]
  1493. # ## ZFS kstat path. Ignored on FreeBSD
  1494. # ## If not specified, then default is:
  1495. # # kstatPath = "/proc/spl/kstat/zfs"
  1496. #
  1497. # ## By default, telegraf gather all zfs stats
  1498. # ## If not specified, then default is:
  1499. # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
  1500. #
  1501. # ## By default, don't gather zpool stats
  1502. # # poolMetrics = false
  1503. # # Reads 'mntr' stats from one or many zookeeper servers
  1504. # [[inputs.zookeeper]]
  1505. # ## An array of address to gather stats about. Specify an ip or hostname
  1506. # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
  1507. #
  1508. # ## If no servers are specified, then localhost is used as the host.
  1509. # ## If no port is specified, 2181 is used
  1510. # servers = [":2181"]
  1511. ###############################################################################
  1512. # SERVICE INPUT PLUGINS #
  1513. ###############################################################################
  1514. # # Read metrics from Kafka topic(s)
  1515. # [[inputs.kafka_consumer]]
  1516. # ## topic(s) to consume
  1517. # topics = ["telegraf"]
  1518. # ## an array of Zookeeper connection strings
  1519. # zookeeper_peers = ["localhost:2181"]
  1520. # ## Zookeeper Chroot
  1521. # zookeeper_chroot = ""
  1522. # ## the name of the consumer group
  1523. # consumer_group = "telegraf_metrics_consumers"
  1524. # ## Offset (must be either "oldest" or "newest")
  1525. # offset = "oldest"
  1526. #
  1527. # ## Data format to consume.
  1528. # ## Each data format has it's own unique set of configuration options, read
  1529. # ## more about them here:
  1530. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1531. # data_format = "influx"
  1532. # # Stream and parse log file(s).
  1533. # [[inputs.logparser]]
  1534. # ## Log files to parse.
  1535. # ## These accept standard unix glob matching rules, but with the addition of
  1536. # ## ** as a "super asterisk". ie:
  1537. # ## /var/log/**.log -> recursively find all .log files in /var/log
  1538. # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
  1539. # ## /var/log/apache.log -> only tail the apache log file
  1540. # files = ["/var/log/apache/access.log"]
  1541. # ## Read file from beginning.
  1542. # from_beginning = false
  1543. #
  1544. # ## Parse logstash-style "grok" patterns:
  1545. # ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
  1546. # [inputs.logparser.grok]
  1547. # ## This is a list of patterns to check the given log file(s) for.
  1548. # ## Note that adding patterns here increases processing time. The most
  1549. # ## efficient configuration is to have one pattern per logparser.
  1550. # ## Other common built-in patterns are:
  1551. # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
  1552. # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
  1553. # patterns = ["%{COMBINED_LOG_FORMAT}"]
  1554. # ## Name of the outputted measurement name.
  1555. # measurement = "apache_access_log"
  1556. # ## Full path(s) to custom pattern files.
  1557. # custom_pattern_files = []
  1558. # ## Custom patterns can also be defined here. Put one pattern per line.
  1559. # custom_patterns = '''
  1560. # '''
  1561. # # Read metrics from MQTT topic(s)
  1562. # [[inputs.mqtt_consumer]]
  1563. # servers = ["localhost:1883"]
  1564. # ## MQTT QoS, must be 0, 1, or 2
  1565. # qos = 0
  1566. #
  1567. # ## Topics to subscribe to
  1568. # topics = [
  1569. # "telegraf/host01/cpu",
  1570. # "telegraf/+/mem",
  1571. # "sensors/#",
  1572. # ]
  1573. #
  1574. # # if true, messages that can't be delivered while the subscriber is offline
  1575. # # will be delivered when it comes back (such as on service restart).
  1576. # # NOTE: if true, client_id MUST be set
  1577. # persistent_session = false
  1578. # # If empty, a random client ID will be generated.
  1579. # client_id = ""
  1580. #
  1581. # ## username and password to connect MQTT server.
  1582. # # username = "telegraf"
  1583. # # password = "metricsmetricsmetricsmetrics"
  1584. #
  1585. # ## Optional SSL Config
  1586. # # ssl_ca = "/etc/telegraf/ca.pem"
  1587. # # ssl_cert = "/etc/telegraf/cert.pem"
  1588. # # ssl_key = "/etc/telegraf/key.pem"
  1589. # ## Use SSL but skip chain & host verification
  1590. # # insecure_skip_verify = false
  1591. #
  1592. # ## Data format to consume.
  1593. # ## Each data format has it's own unique set of configuration options, read
  1594. # ## more about them here:
  1595. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1596. # data_format = "influx"
  1597. # # Read metrics from NATS subject(s)
  1598. # [[inputs.nats_consumer]]
  1599. # ## urls of NATS servers
  1600. # servers = ["nats://localhost:4222"]
  1601. # ## Use Transport Layer Security
  1602. # secure = false
  1603. # ## subject(s) to consume
  1604. # subjects = ["telegraf"]
  1605. # ## name a queue group
  1606. # queue_group = "telegraf_consumers"
  1607. #
  1608. # ## Data format to consume.
  1609. # ## Each data format has it's own unique set of configuration options, read
  1610. # ## more about them here:
  1611. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1612. # data_format = "influx"
  1613. # # Read NSQ topic for metrics.
  1614. # [[inputs.nsq_consumer]]
  1615. # ## An string representing the NSQD TCP Endpoint
  1616. # server = "localhost:4150"
  1617. # topic = "telegraf"
  1618. # channel = "consumer"
  1619. # max_in_flight = 100
  1620. #
  1621. # ## Data format to consume.
  1622. # ## Each data format has it's own unique set of configuration options, read
  1623. # ## more about them here:
  1624. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1625. # data_format = "influx"
  1626. # # Statsd Server
  1627. [[inputs.statsd]]
  1628. # ## Address and port to host UDP listener on
  1629. service_address = ":8125"
  1630. # ## Delete gauges every interval (default=false)
  1631. # delete_gauges = false
  1632. # ## Delete counters every interval (default=false)
  1633. # delete_counters = false
  1634. # ## Delete sets every interval (default=false)
  1635. # delete_sets = false
  1636. # ## Delete timings & histograms every interval (default=true)
  1637. # delete_timings = true
  1638. # ## Percentiles to calculate for timing & histogram stats
  1639. # percentiles = [90]
  1640. #
  1641. # ## separator to use between elements of a statsd metric
  1642. # metric_separator = "_"
  1643. #
  1644. # ## Parses tags in the datadog statsd format
  1645. # ## http://docs.datadoghq.com/guides/dogstatsd/
  1646. # parse_data_dog_tags = false
  1647. #
  1648. # ## Statsd data translation templates, more info can be read here:
  1649. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
  1650. templates = [
  1651. "cpu.* measurement*"
  1652. ]
  1653. #
  1654. # ## Number of UDP messages allowed to queue up, once filled,
  1655. # ## the statsd server will start dropping packets
  1656. allowed_pending_messages = 100
  1657. #
  1658. # ## Number of timing/histogram values to track per-measurement in the
  1659. # ## calculation of percentiles. Raising this limit increases the accuracy
  1660. # ## of percentiles but also increases the memory usage and cpu time.
  1661. # percentile_limit = 1000
  1662. # # Stream a log file, like the tail -f command
  1663. # [[inputs.tail]]
  1664. # ## files to tail.
  1665. # ## These accept standard unix glob matching rules, but with the addition of
  1666. # ## ** as a "super asterisk". ie:
  1667. # ## "/var/log/**.log" -> recursively find all .log files in /var/log
  1668. # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  1669. # ## "/var/log/apache.log" -> just tail the apache log file
  1670. # ##
  1671. # ## See https://github.com/gobwas/glob for more examples
  1672. # ##
  1673. # files = ["/var/mymetrics.out"]
  1674. # ## Read file from beginning.
  1675. # from_beginning = false
  1676. #
  1677. # ## Data format to consume.
  1678. # ## Each data format has it's own unique set of configuration options, read
  1679. # ## more about them here:
  1680. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1681. # data_format = "influx"
  1682. # # Generic TCP listener
  1683. # [[inputs.tcp_listener]]
  1684. # ## Address and port to host TCP listener on
  1685. # service_address = ":8094"
  1686. #
  1687. # ## Number of TCP messages allowed to queue up. Once filled, the
  1688. # ## TCP listener will start dropping packets.
  1689. # allowed_pending_messages = 10000
  1690. #
  1691. # ## Maximum number of concurrent TCP connections to allow
  1692. # max_tcp_connections = 250
  1693. #
  1694. # ## Data format to consume.
  1695. # ## Each data format has it's own unique set of configuration options, read
  1696. # ## more about them here:
  1697. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1698. # data_format = "influx"
  1699. # # Generic UDP listener
  1700. # [[inputs.udp_listener]]
  1701. # ## Address and port to host UDP listener on
  1702. # service_address = ":8092"
  1703. #
  1704. # ## Number of UDP messages allowed to queue up. Once filled, the
  1705. # ## UDP listener will start dropping packets.
  1706. # allowed_pending_messages = 10000
  1707. #
  1708. # ## Data format to consume.
  1709. # ## Each data format has it's own unique set of configuration options, read
  1710. # ## more about them here:
  1711. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1712. # data_format = "influx"
  1713. # # A Webhooks Event collector
  1714. # [[inputs.webhooks]]
  1715. # ## Address and port to host Webhook listener on
  1716. # service_address = ":1619"
  1717. #
  1718. # [inputs.webhooks.github]
  1719. # path = "/github"
  1720. #
  1721. # [inputs.webhooks.mandrill]
  1722. # path = "/mandrill"
  1723. #
  1724. # [inputs.webhooks.rollbar]
  1725. # path = "/rollbar"