telegraf.conf 202.8 KB
Newer Older
L
liu0x54 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})


# Global tags can be specified here in key="value" format.
[global_tags]
  # dc = "us-east-1" # will tag all metrics with dc=us-east-1
  # rack = "1a"
  ## Environment variables can be used as tags, and throughout the config file
  # user = "$USER"


# Configuration for telegraf agent
[agent]
  ## Default data collection interval for all inputs
  interval = "10s"
  ## Rounds collection interval to 'interval'
  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
  round_interval = true

  ## Telegraf will send metrics to outputs in batches of at most
  ## metric_batch_size metrics.
  ## This controls the size of writes that Telegraf sends to output plugins.
  metric_batch_size = 500

  ## Maximum number of unwritten metrics per output.
  metric_buffer_limit = 10000

  ## Collection jitter is used to jitter the collection by a random amount.
  ## Each plugin will sleep for a random time within jitter before collecting.
  ## This can be used to avoid many plugins querying things like sysfs at the
  ## same time, which can have a measurable effect on the system.
  collection_jitter = "0s"

  ## Default flushing interval for all outputs. Maximum flush_interval will be
  ## flush_interval + flush_jitter
  flush_interval = "10s"
  ## Jitter the flush interval by a random amount. This is primarily to avoid
  ## large write spikes for users running a large number of telegraf instances.
  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
  flush_jitter = "0s"

  ## By default or when set to "0s", precision will be set to the same
  ## timestamp order as the collection interval, with the maximum being 1s.
  ##   ie, when interval = "10s", precision will be "1s"
  ##       when interval = "250ms", precision will be "1ms"
  ## Precision will NOT be used for service inputs. It is up to each individual
  ## service input to set the timestamp at the appropriate precision.
  ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
  precision = ""

  ## Log at debug level.
  # debug = false
  ## Log only error level messages.
  # quiet = false

  ## Log file name, the empty string means to log to stderr.
  # logfile = ""

  ## The logfile will be rotated after the time interval specified.  When set
  ## to 0 no time based rotation is performed.  Logs are rotated only when
  ## written to, if there is no log activity rotation may be delayed.
  # logfile_rotation_interval = "0d"

  ## The logfile will be rotated when it becomes larger than the specified
  ## size.  When set to 0 no size based rotation is performed.
  # logfile_rotation_max_size = "0MB"

  ## Maximum number of rotated archives to keep, any older logs are deleted.
  ## If set to -1, no archives are removed.
  # logfile_rotation_max_archives = 5

  ## Override default hostname, if empty use os.Hostname()
  hostname = ""
  ## If set to true, do no set the "host" tag in the telegraf agent.
  omit_hostname = false


###############################################################################
#                            OUTPUT PLUGINS                                   #
###############################################################################


# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
  ## The full HTTP or UDP URL for your InfluxDB instance.
  ##
  ## Multiple URLs can be specified for a single cluster, only ONE of the
  ## urls will be written to each interval.
  # urls = ["unix:///var/run/influxdb.sock"]
  # urls = ["udp://127.0.0.1:8089"]
  # urls = ["http://127.0.0.1:8086"]

  ## The target database for metrics; will be created as needed.
  ## For UDP url endpoint database needs to be configured on server side.
  # database = "telegraf"

  ## The value of this tag will be used to determine the database.  If this
  ## tag is not set the 'database' option is used as the default.
  # database_tag = ""

  ## If true, the database tag will not be added to the metric.
  # exclude_database_tag = false

  ## If true, no CREATE DATABASE queries will be sent.  Set to true when using
  ## Telegraf with a user without permissions to create databases or when the
  ## database already exists.
  # skip_database_creation = false

  ## Name of existing retention policy to write to.  Empty string writes to
  ## the default retention policy.  Only takes effect when using HTTP.
  # retention_policy = ""

  ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
  ## Only takes effect when using HTTP.
  # write_consistency = "any"

  ## Timeout for HTTP messages.
  # timeout = "5s"

  ## HTTP Basic Auth
  # username = "telegraf"
  # password = "metricsmetricsmetricsmetrics"

  ## HTTP User-Agent
  # user_agent = "telegraf"

  ## UDP payload size is the maximum packet size to send.
  # udp_payload = "512B"

  ## Optional TLS Config for use on HTTP connections.
  # tls_ca = "/etc/telegraf/ca.pem"
  # tls_cert = "/etc/telegraf/cert.pem"
  # tls_key = "/etc/telegraf/key.pem"
  ## Use TLS but skip chain & host verification
  # insecure_skip_verify = false

  ## HTTP Proxy override, if unset values the standard proxy environment
  ## variables are consulted to determine which proxy, if any, should be used.
  # http_proxy = "http://corporate.proxy:3128"

  ## Additional HTTP headers
  # http_headers = {"X-Special-Header" = "Special-Value"}

  ## HTTP Content-Encoding for write request body, can be set to "gzip" to
  ## compress body or "identity" to apply no encoding.
  # content_encoding = "identity"

  ## When true, Telegraf will output unsigned integers as unsigned values,
  ## i.e.: "42u".  You will need a version of InfluxDB supporting unsigned
  ## integer values.  Enabling this option will result in field type errors if
  ## existing data has been written.
  # influx_uint_support = false


# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
#   ## Amon Server Key
#   server_key = "my-server-key" # required.
#
#   ## Amon Instance URL
#   amon_instance = "https://youramoninstance" # required
#
#   ## Connection timeout.
#   # timeout = "5s"


# # Publishes metrics to an AMQP broker
# [[outputs.amqp]]
#   ## Broker to publish to.
#   ##   deprecated in 1.7; use the brokers option
#   # url = "amqp://localhost:5672/influxdb"
#
#   ## Brokers to publish to.  If multiple brokers are specified a random broker
#   ## will be selected anytime a connection is established.  This can be
#   ## helpful for load balancing when not using a dedicated load balancer.
#   brokers = ["amqp://localhost:5672/influxdb"]
#
#   ## Maximum messages to send over a connection.  Once this is reached, the
#   ## connection is closed and a new connection is made.  This can be helpful for
#   ## load balancing when not using a dedicated load balancer.
#   # max_messages = 0
#
#   ## Exchange to declare and publish to.
#   exchange = "telegraf"
#
#   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
#   # exchange_type = "topic"
#
#   ## If true, exchange will be passively declared.
#   # exchange_passive = false
#
#   ## Exchange durability can be either "transient" or "durable".
#   # exchange_durability = "durable"
#
#   ## Additional exchange arguments.
#   # exchange_arguments = { }
#   # exchange_arguments = {"hash_propery" = "timestamp"}
#
#   ## Authentication credentials for the PLAIN auth_method.
#   # username = ""
#   # password = ""
#
#   ## Auth method. PLAIN and EXTERNAL are supported
#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
#   ## described here: https://www.rabbitmq.com/plugins.html
#   # auth_method = "PLAIN"
#
#   ## Metric tag to use as a routing key.
#   ##   ie, if this tag exists, its value will be used as the routing key
#   # routing_tag = "host"
#
#   ## Static routing key.  Used when no routing_tag is set or as a fallback
#   ## when the tag specified in routing tag is not found.
#   # routing_key = ""
#   # routing_key = "telegraf"
#
#   ## Delivery Mode controls if a published message is persistent.
#   ##   One of "transient" or "persistent".
#   # delivery_mode = "transient"
#
#   ## InfluxDB database added as a message header.
#   ##   deprecated in 1.7; use the headers option
#   # database = "telegraf"
#
#   ## InfluxDB retention policy added as a message header
#   ##   deprecated in 1.7; use the headers option
#   # retention_policy = "default"
#
#   ## Static headers added to each published message.
#   # headers = { }
#   # headers = {"database" = "telegraf", "retention_policy" = "default"}
#
#   ## Connection timeout.  If not provided, will default to 5s.  0s means no
#   ## timeout (not recommended).
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## If true use batch serialization format instead of line based delimiting.
#   ## Only applies to data formats which are not line based such as JSON.
#   ## Recommended to set to true.
#   # use_batch_format = false
#
#   ## Content encoding for message payloads, can be set to "gzip" to or
#   ## "identity" to apply no encoding.
#   ##
#   ## Please note that when use_batch_format = false each amqp message contains only
#   ## a single metric, it is recommended to use compression with batch format
#   ## for best results.
#   # content_encoding = "identity"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"


# # Send metrics to Azure Application Insights
# [[outputs.application_insights]]
#   ## Instrumentation key of the Application Insights resource.
#   instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
#
#   ## Timeout for closing (default: 5s).
#   # timeout = "5s"
#
#   ## Enable additional diagnostic logging.
#   # enable_diagnostic_logging = false
#
#   ## Context Tag Sources add Application Insights context tags to a tag value.
#   ##
#   ## For list of allowed context tag keys see:
#   ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
#   # [outputs.application_insights.context_tag_sources]
#   #   "ai.cloud.role" = "kubernetes_container_name"
#   #   "ai.cloud.roleInstance" = "kubernetes_pod_name"


# # Send aggregate metrics to Azure Monitor
# [[outputs.azure_monitor]]
#   ## Timeout for HTTP writes.
#   # timeout = "20s"
#
#   ## Set the namespace prefix, defaults to "Telegraf/<input-name>".
#   # namespace_prefix = "Telegraf/"
#
#   ## Azure Monitor doesn't have a string value type, so convert string
#   ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
#   ## a maximum of 10 dimensions so Telegraf will only send the first 10
#   ## alphanumeric dimensions.
#   # strings_as_dimensions = false
#
#   ## Both region and resource_id must be set or be available via the
#   ## Instance Metadata service on Azure Virtual Machines.
#   #
#   ## Azure Region to publish metrics against.
#   ##   ex: region = "southcentralus"
#   # region = ""
#   #
#   ## The Azure Resource ID against which metric will be logged, e.g.
#   ##   ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
#   # resource_id = ""
#
#   ## Optionally, if in Azure US Government, China or other sovereign
#   ## cloud environment, set appropriate REST endpoint for receiving
#   ## metrics. (Note: region may be unused in this context)
#   # endpoint_url = "https://monitoring.core.usgovcloudapi.net"


# # Publish Telegraf metrics to a Google Cloud PubSub topic
# [[outputs.cloud_pubsub]]
#   ## Required. Name of Google Cloud Platform (GCP) Project that owns
#   ## the given PubSub topic.
#   project = "my-project"
#
#   ## Required. Name of PubSub topic to publish metrics to.
#   topic = "my-topic"
#
#   ## Required. Data format to consume.
#   ## Each data format has its own unique set of configuration options.
#   ## Read more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"
#
#   ## Optional. Filepath for GCP credentials JSON file to authorize calls to
#   ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
#   ## Application Default Credentials, which is preferred.
#   # credentials_file = "path/to/my/creds.json"
#
#   ## Optional. If true, will send all metrics per write in one PubSub message.
#   # send_batched = true
#
#   ## The following publish_* parameters specifically configures batching
#   ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
#   ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
#
#   ## Optional. Send a request to PubSub (i.e. actually publish a batch)
#   ## when it has this many PubSub messages. If send_batched is true,
#   ## this is ignored and treated as if it were 1.
#   # publish_count_threshold = 1000
#
#   ## Optional. Send a request to PubSub (i.e. actually publish a batch)
#   ## when it has this many PubSub messages. If send_batched is true,
#   ## this is ignored and treated as if it were 1
#   # publish_byte_threshold = 1000000
#
#   ## Optional. Specifically configures requests made to the PubSub API.
#   # publish_num_go_routines = 2
#
#   ## Optional. Specifies a timeout for requests to the PubSub API.
#   # publish_timeout = "30s"
#
#   ## Optional. If true, published PubSub message data will be base64-encoded.
#   # base64_data = false
#
#   ## Optional. PubSub attributes to add to metrics.
#   # [[inputs.pubsub.attributes]]
#   #   my_attr = "tag_value"


# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
#   ## Amazon REGION
#   region = "us-east-1"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   #access_key = ""
#   #secret_key = ""
#   #token = ""
#   #role_arn = ""
#   #profile = ""
#   #shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   ## Namespace for the CloudWatch MetricDatums
#   namespace = "InfluxData/Telegraf"
#
#   ## If you have a large amount of metrics, you should consider to send statistic
#   ## values instead of raw metrics which could not only improve performance but
#   ## also save AWS API cost. If enable this flag, this plugin would parse the required
#   ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
#   ## You could use basicstats aggregator to calculate those fields. If not all statistic
#   ## fields are available, all fields would still be sent as raw metrics.
#   # write_statistics = false


# # Configuration for CrateDB to send metrics to.
# [[outputs.cratedb]]
#   # A github.com/jackc/pgx connection string.
#   # See https://godoc.org/github.com/jackc/pgx#ParseDSN
#   url = "postgres://user:password@localhost/schema?sslmode=disable"
#   # Timeout for all CrateDB queries.
#   timeout = "5s"
#   # Name of the table to store metrics in.
#   table = "metrics"
#   # If true, and the metrics table does not exist, create it automatically.
#   table_create = true


# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
#   ## Datadog API key
#   apikey = "my-secret-key" # required.
#
#   # The base endpoint URL can optionally be specified but it defaults to:
#   #url = "https://app.datadoghq.com/api/v1/series"
#
#   ## Connection timeout.
#   # timeout = "5s"


# # Send metrics to nowhere at all
# [[outputs.discard]]
#   # no configuration


# # Configuration for Elasticsearch to send metrics to.
# [[outputs.elasticsearch]]
#   ## The full HTTP endpoint URL for your Elasticsearch instance
#   ## Multiple urls can be specified as part of the same cluster,
#   ## this means that only ONE of the urls will be written to each interval.
#   urls = [ "http://node1.es.example.com:9200" ] # required.
#   ## Elasticsearch client timeout, defaults to "5s" if not set.
#   timeout = "5s"
#   ## Set to true to ask Elasticsearch a list of all cluster nodes,
#   ## thus it is not necessary to list all nodes in the urls config option.
#   enable_sniffer = false
#   ## Set the interval to check if the Elasticsearch nodes are available
#   ## Setting to "0s" will disable the health check (not recommended in production)
#   health_check_interval = "10s"
#   ## HTTP basic authentication details
#   # username = "telegraf"
#   # password = "mypassword"
#
#   ## Index Config
#   ## The target index for metrics (Elasticsearch will create if it not exists).
#   ## You can use the date specifiers below to create indexes per time frame.
#   ## The metric timestamp will be used to decide the destination index name
#   # %Y - year (2016)
#   # %y - last two digits of year (00..99)
#   # %m - month (01..12)
#   # %d - day of month (e.g., 01)
#   # %H - hour (00..23)
#   # %V - week of the year (ISO week) (01..53)
#   ## Additionally, you can specify a tag name using the notation {{tag_name}}
#   ## which will be used as part of the index name. If the tag does not exist,
#   ## the default tag value will be used.
#   # index_name = "telegraf-{{host}}-%Y.%m.%d"
#   # default_tag_value = "none"
#   index_name = "telegraf-%Y.%m.%d" # required.
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Template Config
#   ## Set to true if you want telegraf to manage its index template.
#   ## If enabled it will create a recommended index template for telegraf indexes
#   manage_template = true
#   ## The template name used for telegraf indexes
#   template_name = "telegraf"
#   ## Set to true if you want telegraf to overwrite an existing template
#   overwrite_template = false


# # Send metrics to command as input over stdin
# [[outputs.exec]]
#   ## Command to injest metrics via stdin.
#   command = ["tee", "-a", "/dev/null"]
#
#   ## Timeout for command to complete.
#   # timeout = "5s"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"


# # Send telegraf metrics to file(s)
# [[outputs.file]]
#   ## Files to write to, "stdout" is a specially handled file.
#   files = ["stdout", "/tmp/metrics.out"]
#
#   ## The file will be rotated after the time interval specified.  When set
#   ## to 0 no time based rotation is performed.
#   # rotation_interval = "0d"
#
#   ## The logfile will be rotated when it becomes larger than the specified
#   ## size.  When set to 0 no size based rotation is performed.
#   # rotation_max_size = "0MB"
#
#   ## Maximum number of rotated archives to keep, any older logs are deleted.
#   ## If set to -1, no archives are removed.
#   # rotation_max_archives = 5
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Configuration for Graphite server to send metrics to
# [[outputs.graphite]]
#   ## TCP endpoint for your graphite instance.
#   ## If multiple endpoints are configured, output will be load balanced.
#   ## Only one of the endpoints will be written to with each iteration.
#   servers = ["localhost:2003"]
#   ## Prefix metrics name
#   prefix = ""
#   ## Graphite output template
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   template = "host.tags.measurement.field"
#
#   ## Enable Graphite tags support
#   # graphite_tag_support = false
#
#   ## timeout in seconds for the write connection to graphite
#   timeout = 2
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
#   ## UDP endpoint for your graylog instance.
#   servers = ["127.0.0.1:12201", "192.168.1.1:12201"]


# # Configurable HTTP health check resource based on metrics
# [[outputs.health]]
#   ## Address and port to listen on.
#   ##   ex: service_address = "http://localhost:8080"
#   ##       service_address = "unix:///var/run/telegraf-health.sock"
#   # service_address = "http://:8080"
#
#   ## The maximum duration for reading the entire request.
#   # read_timeout = "5s"
#   ## The maximum duration for writing the entire response.
#   # write_timeout = "5s"
#
#   ## Username and password to accept for HTTP basic authentication.
#   # basic_username = "user1"
#   # basic_password = "secret"
#
#   ## Allowed CA certificates for client certificates.
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## TLS server certificate and private key.
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## One or more check sub-tables should be defined, it is also recommended to
#   ## use metric filtering to limit the metrics that flow into this output.
#   ##
#   ## When using the default buffer sizes, this example will fail when the
#   ## metric buffer is half full.
#   ##
#   ## namepass = ["internal_write"]
#   ## tagpass = { output = ["influxdb"] }
#   ##
#   ## [[outputs.health.compares]]
#   ##   field = "buffer_size"
#   ##   lt = 5000.0
#   ##
#   ## [[outputs.health.contains]]
#   ##   field = "buffer_size"


# # A plugin that can transmit metrics over HTTP
 [[outputs.http]]
#   ## URL is the address to send metrics to
url = "http://172.15.1.8:10202/telegraf"
#
#   ## Timeout for HTTP message
#   # timeout = "5s"
#
#   ## HTTP method, one of: "POST" or "PUT"
#   # method = "POST"
#
#   ## HTTP Basic Auth credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## OAuth2 Client Credentials Grant
#   # client_id = "clientid"
#   # client_secret = "secret"
#   # token_url = "https://indentityprovider/oauth2/v1/token"
#   # scopes = ["urn:opc:idm:__myscopes__"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to output.
#   ## Each data format has it's own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
json_timestamp_units = "1ms"
#
#   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
#   ## compress body or "identity" to apply no encoding.
#   # content_encoding = "identity"
#
#   ## Additional HTTP headers
#   # [outputs.http.headers]
#   #   # Should be set manually to "application/json" for json data_format
#   #   Content-Type = "text/plain; charset=utf-8"


# # Configuration for sending metrics to InfluxDB
# [[outputs.influxdb_v2]]
#   ## The URLs of the InfluxDB cluster nodes.
#   ##
#   ## Multiple URLs can be specified for a single cluster, only ONE of the
#   ## urls will be written to each interval.
#   urls = ["http://127.0.0.1:9999"]
#
#   ## Token for authentication.
#   token = ""
#
#   ## Organization is the name of the organization you wish to write to; must exist.
#   organization = ""
#
#   ## Destination bucket to write into.
#   bucket = ""
#
#   ## The value of this tag will be used to determine the bucket.  If this
#   ## tag is not set the 'bucket' option is used as the default.
#   # bucket_tag = ""
#
#   ## If true, the bucket tag will not be added to the metric.
#   # exclude_bucket_tag = false
#
#   ## Timeout for HTTP messages.
#   # timeout = "5s"
#
#   ## Additional HTTP headers
#   # http_headers = {"X-Special-Header" = "Special-Value"}
#
#   ## HTTP Proxy override, if unset values the standard proxy environment
#   ## variables are consulted to determine which proxy, if any, should be used.
#   # http_proxy = "http://corporate.proxy:3128"
#
#   ## HTTP User-Agent
#   # user_agent = "telegraf"
#
#   ## Content-Encoding for write request body, can be set to "gzip" to
#   ## compress body or "identity" to apply no encoding.
#   # content_encoding = "gzip"
#
#   ## Enable or disable uint support for writing uints influxdb 2.0.
#   # influx_uint_support = false
#
#   ## Optional TLS Config for use on HTTP connections.
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
#   ## Project API Token (required)
#   api_token = "API Token" # required
#   ## Prefix the metrics with a given name
#   prefix = ""
#   ## Stats output template (Graphite formatting)
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
#   template = "host.tags.measurement.field"
#   ## Timeout in seconds to connect
#   timeout = "2s"
#   ## Display Communcation to Instrumental
#   debug = false


# # Configuration for the Kafka server to send metrics to
# [[outputs.kafka]]
#   ## URLs of kafka brokers
#   brokers = ["localhost:9092"]
#   ## Kafka topic for producer messages
#   topic = "telegraf"
#
#   ## Optional Client id
#   # client_id = "Telegraf"
#
#   ## Set the minimal supported Kafka version.  Setting this enables the use of new
#   ## Kafka features and APIs.  Of particular interest, lz4 compression
#   ## requires at least version 0.10.0.0.
#   ##   ex: version = "1.1.0"
#   # version = ""
#
#   ## Optional topic suffix configuration.
#   ## If the section is omitted, no suffix is used.
#   ## Following topic suffix methods are supported:
#   ##   measurement - suffix equals to separator + measurement's name
#   ##   tags        - suffix equals to separator + specified tags' values
#   ##                 interleaved with separator
#
#   ## Suffix equals to "_" + measurement name
#   # [outputs.kafka.topic_suffix]
#   #   method = "measurement"
#   #   separator = "_"
#
#   ## Suffix equals to "__" + measurement's "foo" tag value.
#   ##   If there's no such a tag, suffix equals to an empty string
#   # [outputs.kafka.topic_suffix]
#   #   method = "tags"
#   #   keys = ["foo"]
#   #   separator = "__"
#
#   ## Suffix equals to "_" + measurement's "foo" and "bar"
#   ##   tag values, separated by "_". If there is no such tags,
#   ##   their values treated as empty strings.
#   # [outputs.kafka.topic_suffix]
#   #   method = "tags"
#   #   keys = ["foo", "bar"]
#   #   separator = "_"
#
#   ## Telegraf tag to use as a routing key
#   ##  ie, if this tag exists, its value will be used as the routing key
#   routing_tag = "host"
#
#   ## Static routing key.  Used when no routing_tag is set or as a fallback
#   ## when the tag specified in routing tag is not found.  If set to "random",
#   ## a random value will be generated for each message.
#   ##   ex: routing_key = "random"
#   ##       routing_key = "telegraf"
#   # routing_key = ""
#
#   ## CompressionCodec represents the various compression codecs recognized by
#   ## Kafka in messages.
#   ##  0 : No compression
#   ##  1 : Gzip compression
#   ##  2 : Snappy compression
#   ##  3 : LZ4 compression
#   # compression_codec = 0
#
#   ##  RequiredAcks is used in Produce Requests to tell the broker how many
#   ##  replica acknowledgements it must see before responding
#   ##   0 : the producer never waits for an acknowledgement from the broker.
#   ##       This option provides the lowest latency but the weakest durability
#   ##       guarantees (some data will be lost when a server fails).
#   ##   1 : the producer gets an acknowledgement after the leader replica has
#   ##       received the data. This option provides better durability as the
#   ##       client waits until the server acknowledges the request as successful
#   ##       (only messages that were written to the now-dead leader but not yet
#   ##       replicated will be lost).
#   ##   -1: the producer gets an acknowledgement after all in-sync replicas have
#   ##       received the data. This option provides the best durability, we
#   ##       guarantee that no messages will be lost as long as at least one in
#   ##       sync replica remains.
#   # required_acks = -1
#
#   ## The maximum number of times to retry sending a metric before failing
#   ## until the next flush.
#   # max_retry = 3
#
#   ## The maximum permitted size of a message. Should be set equal to or
#   ## smaller than the broker's 'message.max.bytes'.
#   # max_message_bytes = 1000000
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional SASL Config
#   # sasl_username = "kafka"
#   # sasl_password = "secret"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"


# # Configuration for the AWS Kinesis output.
# [[outputs.kinesis]]
#   ## Amazon REGION of kinesis endpoint.
#   region = "ap-southeast-2"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   #access_key = ""
#   #secret_key = ""
#   #token = ""
#   #role_arn = ""
#   #profile = ""
#   #shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   ## Kinesis StreamName must exist prior to starting telegraf.
#   streamname = "StreamName"
#   ## DEPRECATED: PartitionKey as used for sharding data.
#   partitionkey = "PartitionKey"
#   ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
#   ## This allows for scaling across multiple shards in a stream.
#   ## This will cause issues with ordering.
#   use_random_partitionkey = false
#   ## The partition key can be calculated using one of several methods:
#   ##
#   ## Use a static value for all writes:
#   #  [outputs.kinesis.partition]
#   #    method = "static"
#   #    key = "howdy"
#   #
#   ## Use a random partition key on each write:
#   #  [outputs.kinesis.partition]
#   #    method = "random"
#   #
#   ## Use the measurement name as the partition key:
#   #  [outputs.kinesis.partition]
#   #    method = "measurement"
#   #
#   ## Use the value of a tag for all writes, if the tag is not set the empty
#   ## default option will be used. When no default, defaults to "telegraf"
#   #  [outputs.kinesis.partition]
#   #    method = "tag"
#   #    key = "host"
#   #    default = "mykey"
#
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"
#
#   ## debug will show upstream aws messages.
#   debug = false


# # Configuration for Librato API to send metrics to.
# [[outputs.librato]]
#   ## Librator API Docs
#   ## http://dev.librato.com/v1/metrics-authentication
#   ## Librato API user
#   api_user = "telegraf@influxdb.com" # required.
#   ## Librato API token
#   api_token = "my-secret-token" # required.
#   ## Debug
#   # debug = false
#   ## Connection timeout.
#   # timeout = "5s"
#   ## Output source Template (same as graphite buckets)
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
#   ## This template is used in librato's source (not metric's name)
#   template = "host"
#


# # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]]
#   servers = ["localhost:1883"] # required.
#
#   ## MQTT outputs send metrics to this topic format
#   ##    "<topic_prefix>/<hostname>/<pluginname>/"
#   ##   ex: prefix/web01.example.com/mem
#   topic_prefix = "telegraf"
#
#   ## QoS policy for messages
#   ##   0 = at most once
#   ##   1 = at least once
#   ##   2 = exactly once
#   # qos = 2
#
#   ## username and password to connect MQTT server.
#   # username = "telegraf"
#   # password = "metricsmetricsmetricsmetrics"
#
#   ## client ID, if not set a random ID is generated
#   # client_id = ""
#
#   ## Timeout for write operations. default: 5s
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## When true, metrics will be sent in one MQTT message per flush.  Otherwise,
#   ## metrics are written one metric per MQTT message.
#   # batch = false
#
#   ## When true, metric will have RETAIN flag set, making broker cache entries until someone
#   ## actually reads it
#   # retain = false
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Send telegraf measurements to NATS
# [[outputs.nats]]
#   ## URLs of NATS servers
#   servers = ["nats://localhost:4222"]
#   ## Optional credentials
#   # username = ""
#   # password = ""
#   ## NATS subject for producer messages
#   subject = "telegraf"
#
#   ## Use Transport Layer Security
#   # secure = false
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
#   ## Location of nsqd instance listening on TCP
#   server = "localhost:4150"
#   ## NSQ topic for producer messages
#   topic = "telegraf"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Configuration for OpenTSDB server to send metrics to
# [[outputs.opentsdb]]
#   ## prefix for metrics keys
#   prefix = "my.specific.prefix."
#
#   ## DNS name of the OpenTSDB server
#   ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
#   ## telnet API. "http://opentsdb.example.com" will use the Http API.
#   host = "opentsdb.example.com"
#
#   ## Port of the OpenTSDB server
#   port = 4242
#
#   ## Number of data points to send to OpenTSDB in Http requests.
#   ## Not used with telnet API.
#   http_batch_size = 50
#
#   ## URI Path for Http requests to OpenTSDB.
#   ## Used in cases where OpenTSDB is located behind a reverse proxy.
#   http_path = "/api/put"
#
#   ## Debug true - Prints OpenTSDB communication
#   debug = false
#
#   ## Separator separates measurement name from field
#   separator = "_"


# # Configuration for the Prometheus client to spawn
# [[outputs.prometheus_client]]
#   ## Address to listen on
#   listen = ":9273"
#
#   ## Use HTTP Basic Authentication.
#   # basic_username = "Foo"
#   # basic_password = "Bar"
#
#   ## If set, the IP Ranges which are allowed to access metrics.
#   ##   ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
#   # ip_range = []
#
#   ## Path to publish the metrics on.
#   # path = "/metrics"
#
#   ## Expiration interval for each metric. 0 == no expiration
#   # expiration_interval = "60s"
#
#   ## Collectors to enable, valid entries are "gocollector" and "process".
#   ## If unset, both are enabled.
#   # collectors_exclude = ["gocollector", "process"]
#
#   ## Send string metrics as Prometheus labels.
#   ## Unless set to false all string metrics will be sent as labels.
#   # string_as_label = true
#
#   ## If set, enable TLS with the given certificate.
#   # tls_cert = "/etc/ssl/telegraf.crt"
#   # tls_key = "/etc/ssl/telegraf.key"
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Export metric collection time.
#   # export_timestamp = false


# # Configuration for the Riemann server to send metrics to
# [[outputs.riemann]]
#   ## The full TCP or UDP URL of the Riemann server
#   url = "tcp://localhost:5555"
#
#   ## Riemann event TTL, floating-point time in seconds.
#   ## Defines how long that an event is considered valid for in Riemann
#   # ttl = 30.0
#
#   ## Separator to use between measurement and field name in Riemann service name
#   ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
#   separator = "/"
#
#   ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
#   # measurement_as_attribute = false
#
#   ## Send string metrics as Riemann event states.
#   ## Unless enabled all string metrics will be ignored
#   # string_as_state = false
#
#   ## A list of tag keys whose values get sent as Riemann tags.
#   ## If empty, all Telegraf tag values will be sent as tags
#   # tag_keys = ["telegraf","custom_tag"]
#
#   ## Additional Riemann tags to send.
#   # tags = ["telegraf-output"]
#
#   ## Description for Riemann event
#   # description_text = "metrics collected from telegraf"
#
#   ## Riemann client write timeout, defaults to "5s" if not set.
#   # timeout = "5s"


# # Configuration for the Riemann server to send metrics to
# [[outputs.riemann_legacy]]
#   ## URL of server
#   url = "localhost:5555"
#   ## transport protocol to use either tcp or udp
#   transport = "tcp"
#   ## separator to use between input name and field name in Riemann service name
#   separator = " "


# # Generic socket writer capable of handling multiple socket types.
# [[outputs.socket_writer]]
#   ## URL to connect to
#   # address = "tcp://127.0.0.1:8094"
#   # address = "tcp://example.com:http"
#   # address = "tcp4://127.0.0.1:8094"
#   # address = "tcp6://127.0.0.1:8094"
#   # address = "tcp6://[2001:db8::1]:8094"
#   # address = "udp://127.0.0.1:8094"
#   # address = "udp4://127.0.0.1:8094"
#   # address = "udp6://127.0.0.1:8094"
#   # address = "unix:///tmp/telegraf.sock"
#   # address = "unixgram:///tmp/telegraf.sock"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Period between keep alive probes.
#   ## Only applies to TCP sockets.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   # keep_alive_period = "5m"
#
#   ## Data format to generate.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # Configuration for Google Cloud Stackdriver to send metrics to
# [[outputs.stackdriver]]
#   ## GCP Project
#   project = "erudite-bloom-151019"
#
#   ## The namespace for the metric descriptor
#   namespace = "telegraf"
#
#   ## Custom resource type
#   # resource_type = "generic_node"
#
#   ## Additonal resource labels
#   # [outputs.stackdriver.resource_labels]
#   #   node_id = "$HOSTNAME"
#   #   namespace = "myapp"
#   #   location = "eu-north0"


# # Configuration for Syslog server to send metrics to
# [[outputs.syslog]]
#   ## URL to connect to
#   ## ex: address = "tcp://127.0.0.1:8094"
#   ## ex: address = "tcp4://127.0.0.1:8094"
#   ## ex: address = "tcp6://127.0.0.1:8094"
#   ## ex: address = "tcp6://[2001:db8::1]:8094"
#   ## ex: address = "udp://127.0.0.1:8094"
#   ## ex: address = "udp4://127.0.0.1:8094"
#   ## ex: address = "udp6://127.0.0.1:8094"
#   address = "tcp://127.0.0.1:8094"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Period between keep alive probes.
#   ## Only applies to TCP sockets.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   # keep_alive_period = "5m"
#
#   ## The framing technique with which it is expected that messages are
#   ## transported (default = "octet-counting").  Whether the messages come
#   ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
#   ## or the non-transparent framing technique (RFC6587#section-3.4.2).  Must
#   ## be one of "octet-counting", "non-transparent".
#   # framing = "octet-counting"
#
#   ## The trailer to be expected in case of non-trasparent framing (default = "LF").
#   ## Must be one of "LF", or "NUL".
#   # trailer = "LF"
#
#   ## SD-PARAMs settings
#   ## Syslog messages can contain key/value pairs within zero or more
#   ## structured data sections.  For each unrecognised metric tag/field a
#   ## SD-PARAMS is created.
#   ##
#   ## Example:
#   ##   [[outputs.syslog]]
#   ##     sdparam_separator = "_"
#   ##     default_sdid = "default@32473"
#   ##     sdids = ["foo@123", "bar@456"]
#   ##
#   ##   input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
#   ##   output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
#
#   ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
#   # sdparam_separator = "_"
#
#   ## Default sdid used for tags/fields that don't contain a prefix defined in
#   ## the explict sdids setting below If no default is specified, no SD-PARAMs
#   ## will be used for unrecognised field.
#   # default_sdid = "default@32473"
#
#   ## List of explicit prefixes to extract from tag/field keys and use as the
#   ## SDID, if they match (see above example for more details):
#   # sdids = ["foo@123", "bar@456"]
#
#   ## Default severity value. Severity and Facility are used to calculate the
#   ## message PRI value (RFC5424#section-6.2.1).  Used when no metric field
#   ## with key "severity_code" is defined.  If unset, 5 (notice) is the default
#   # default_severity_code = 5
#
#   ## Default facility value. Facility and Severity are used to calculate the
#   ## message PRI value (RFC5424#section-6.2.1).  Used when no metric field with
#   ## key "facility_code" is defined.  If unset, 1 (user-level) is the default
#   # default_facility_code = 1
#
#   ## Default APP-NAME value (RFC5424#section-6.2.5)
#   ## Used when no metric tag with key "appname" is defined.
#   ## If unset, "Telegraf" is the default
#   # default_appname = "Telegraf"


# # Configuration for Wavefront server to send metrics to
# [[outputs.wavefront]]
#   ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
#   ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
#   url = "https://metrics.wavefront.com"
#
#   ## Authentication Token for Wavefront. Only required if using Direct Ingestion
#   #token = "DUMMY_TOKEN"
#
#   ## DNS name of the wavefront proxy server. Do not use if url is specified
#   #host = "wavefront.example.com"
#
#   ## Port that the Wavefront proxy server listens on. Do not use if url is specified
#   #port = 2878
#
#   ## prefix for metrics keys
#   #prefix = "my.specific.prefix."
#
#   ## whether to use "value" for name of simple fields. default is false
#   #simple_fields = false
#
#   ## character to use between metric and field name.  default is . (dot)
#   #metric_separator = "."
#
#   ## Convert metric name paths to use metricSeparator character
#   ## When true will convert all _ (underscore) characters in final metric name. default is true
#   #convert_paths = true
#
#   ## Use Strict rules to sanitize metric and tag names from invalid characters
#   ## When enabled forward slash (/) and comma (,) will be accpeted
#   #use_strict = false
#
#   ## Use Regex to sanitize metric and tag names from invalid characters
#   ## Regex is more thorough, but significantly slower. default is false
#   #use_regex = false
#
#   ## point tags to use as the source name for Wavefront (if none found, host will be used)
#   #source_override = ["hostname", "address", "agent_host", "node_host"]
#
#   ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
#   #convert_bool = true
#
#   ## Define a mapping, namespaced by metric prefix, from string values to numeric values
#   ##   deprecated in 1.9; use the enum processor plugin
#   #[[outputs.wavefront.string_to_number.elasticsearch]]
#   #  green = 1.0
#   #  yellow = 0.5
#   #  red = 0.0


###############################################################################
#                            PROCESSOR PLUGINS                                #
###############################################################################


# # Convert values to another metric value type
# [[processors.converter]]
#   ## Tags to convert
#   ##
#   ## The table key determines the target type, and the array of key-values
#   ## select the keys to convert.  The array may contain globs.
#   ##   <target-type> = [<tag-key>...]
#   [processors.converter.tags]
#     string = []
#     integer = []
#     unsigned = []
#     boolean = []
#     float = []
#
#   ## Fields to convert
#   ##
#   ## The table key determines the target type, and the array of key-values
#   ## select the keys to convert.  The array may contain globs.
#   ##   <target-type> = [<field-key>...]
#   [processors.converter.fields]
#     tag = []
#     string = []
#     integer = []
#     unsigned = []
#     boolean = []
#     float = []


# # Dates measurements, tags, and fields that pass through this filter.
# [[processors.date]]
#   ## New tag to create
#   tag_key = "month"
#
#   ## Date format string, must be a representation of the Go "reference time"
#   ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
#   date_format = "Jan"


# # Map enum values according to given table.
# [[processors.enum]]
#   [[processors.enum.mapping]]
#     ## Name of the field to map
#     field = "status"
#
#     ## Name of the tag to map
#     # tag = "status"
#
#     ## Destination tag or field to be used for the mapped value.  By default the
#     ## source tag or field is used, overwriting the original value.
#     dest = "status_code"
#
#     ## Default value to be used for all values not contained in the mapping
#     ## table.  When unset, the unmodified value for the field will be used if no
#     ## match is found.
#     # default = 0
#
#     ## Table of mappings
#     [processors.enum.mapping.value_mappings]
#       green = 1
#       amber = 2
#       red = 3


# # Apply metric modifications using override semantics.
# [[processors.override]]
#   ## All modifications on inputs and aggregators can be overridden:
#   # name_override = "new_name"
#   # name_prefix = "new_name_prefix"
#   # name_suffix = "new_name_suffix"
#
#   ## Tags to be added (all values must be strings)
#   # [processors.override.tags]
#   #   additional_tag = "tag_value"


# # Parse a value in a specified field/tag(s) and add the result in a new metric
# [[processors.parser]]
#   ## The name of the fields whose value will be parsed.
#   parse_fields = []
#
#   ## If true, incoming metrics are not emitted.
#   drop_original = false
#
#   ## If set to override, emitted metrics will be merged by overriding the
#   ## original metric using the newly parsed metrics.
#   merge = "override"
#
#   ## The dataformat to be read from files
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Rotate a single valued metric into a multi field metric
# [[processors.pivot]]
#   ## Tag to use for naming the new field.
#   tag_key = "name"
#   ## Field to use as the value of the new field.
#   value_key = "value"


# # Print all metrics that pass through this filter.
# [[processors.printer]]


# # Transforms tag and field values with regex pattern
# [[processors.regex]]
#   ## Tag and field conversions defined in a separate sub-tables
#   # [[processors.regex.tags]]
#   #   ## Tag to change
#   #   key = "resp_code"
#   #   ## Regular expression to match on a tag value
#   #   pattern = "^(\\d)\\d\\d$"
#   #   ## Matches of the pattern will be replaced with this string.  Use ${1}
#   #   ## notation to use the text of the first submatch.
#   #   replacement = "${1}xx"
#
#   # [[processors.regex.fields]]
#   #   ## Field to change
#   #   key = "request"
#   #   ## All the power of the Go regular expressions available here
#   #   ## For example, named subgroups
#   #   pattern = "^/api(?P<method>/[\\w/]+)\\S*"
#   #   replacement = "${method}"
#   #   ## If result_key is present, a new field will be created
#   #   ## instead of changing existing field
#   #   result_key = "method"
#
#   ## Multiple conversions may be applied for one field sequentially
#   ## Let's extract one more value
#   # [[processors.regex.fields]]
#   #   key = "request"
#   #   pattern = ".*category=(\\w+).*"
#   #   replacement = "${1}"
#   #   result_key = "search_category"


# # Rename measurements, tags, and fields that pass through this filter.
# [[processors.rename]]


# # Perform string processing on tags, fields, and measurements
# [[processors.strings]]
#   ## Convert a tag value to uppercase
#   # [[processors.strings.uppercase]]
#   #   tag = "method"
#
#   ## Convert a field value to lowercase and store in a new field
#   # [[processors.strings.lowercase]]
#   #   field = "uri_stem"
#   #   dest = "uri_stem_normalised"
#
#   ## Trim leading and trailing whitespace using the default cutset
#   # [[processors.strings.trim]]
#   #   field = "message"
#
#   ## Trim leading characters in cutset
#   # [[processors.strings.trim_left]]
#   #   field = "message"
#   #   cutset = "\t"
#
#   ## Trim trailing characters in cutset
#   # [[processors.strings.trim_right]]
#   #   field = "message"
#   #   cutset = "\r\n"
#
#   ## Trim the given prefix from the field
#   # [[processors.strings.trim_prefix]]
#   #   field = "my_value"
#   #   prefix = "my_"
#
#   ## Trim the given suffix from the field
#   # [[processors.strings.trim_suffix]]
#   #   field = "read_count"
#   #   suffix = "_count"
#
#   ## Replace all non-overlapping instances of old with new
#   # [[processors.strings.replace]]
#   #   measurement = "*"
#   #   old = ":"
#   #   new = "_"
#
#   ## Trims strings based on width
#   # [[processors.strings.left]]
#   #   field = "message"
#   #   width = 10


# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
# [[processors.tag_limit]]
#   ## Maximum number of tags to preserve
#   limit = 10
#
#   ## List of tags to preferentially preserve
#   keep = ["foo", "bar", "baz"]


# # Print all metrics that pass through this filter.
# [[processors.topk]]
#   ## How many seconds between aggregations
#   # period = 10
#
#   ## How many top metrics to return
#   # k = 10
#
#   ## Over which tags should the aggregation be done. Globs can be specified, in
#   ## which case any tag matching the glob will aggregated over. If set to an
#   ## empty list is no aggregation over tags is done
#   # group_by = ['*']
#
#   ## Over which fields are the top k are calculated
#   # fields = ["value"]
#
#   ## What aggregation to use. Options: sum, mean, min, max
#   # aggregation = "mean"
#
#   ## Instead of the top k largest metrics, return the bottom k lowest metrics
#   # bottomk = false
#
#   ## The plugin assigns each metric a GroupBy tag generated from its name and
#   ## tags. If this setting is different than "" the plugin will add a
#   ## tag (which name will be the value of this setting) to each metric with
#   ## the value of the calculated GroupBy tag. Useful for debugging
#   # add_groupby_tag = ""
#
#   ## These settings provide a way to know the position of each metric in
#   ## the top k. The 'add_rank_field' setting allows to specify for which
#   ## fields the position is required. If the list is non empty, then a field
#   ## will be added to each and every metric for each string present in this
#   ## setting. This field will contain the ranking of the group that
#   ## the metric belonged to when aggregated over that field.
#   ## The name of the field will be set to the name of the aggregation field,
#   ## suffixed with the string '_topk_rank'
#   # add_rank_fields = []
#
#   ## These settings provide a way to know what values the plugin is generating
#   ## when aggregating metrics. The 'add_agregate_field' setting allows to
#   ## specify for which fields the final aggregation value is required. If the
#   ## list is non empty, then a field will be added to each every metric for
#   ## each field present in this setting. This field will contain
#   ## the computed aggregation for the group that the metric belonged to when
#   ## aggregated over that field.
#   ## The name of the field will be set to the name of the aggregation field,
#   ## suffixed with the string '_topk_aggregate'
#   # add_aggregate_fields = []


# # Rotate multi field metric into several single field metrics
# [[processors.unpivot]]
#   ## Tag to use for the name.
#   tag_key = "name"
#   ## Field to use for the name of the value.
#   value_key = "value"


###############################################################################
#                            AGGREGATOR PLUGINS                               #
###############################################################################


# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#
#   ## Configures which basic stats to push as fields
#   # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]


# # Report the final metric of a series
# [[aggregators.final]]
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#
#   ## The time that a series is not updated until considering it final.
#   series_timeout = "5m"


# # Create aggregate histograms.
# [[aggregators.histogram]]
#   ## The period in which to flush the aggregator.
#   period = "30s"
#
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#
#   ## If true, the histogram will be reset on flush instead
#   ## of accumulating the results.
#   reset = false
#
#   ## Example config that aggregates all fields of the metric.
#   # [[aggregators.histogram.config]]
#   #   ## The set of buckets.
#   #   buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
#   #   ## The name of metric.
#   #   measurement_name = "cpu"
#
#   ## Example config that aggregates only specific fields of the metric.
#   # [[aggregators.histogram.config]]
#   #   ## The set of buckets.
#   #   buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
#   #   ## The name of metric.
#   #   measurement_name = "diskio"
#   #   ## The concrete fields of metric
#   #   fields = ["io_time", "read_time", "write_time"]


# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
#   ## General Aggregator Arguments:
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false


# # Count the occurrence of values in fields.
# [[aggregators.valuecounter]]
#   ## General Aggregator Arguments:
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#   ## The fields for which the values will be counted
#   fields = []


###############################################################################
#                            INPUT PLUGINS                                    #
###############################################################################


# Read metrics about cpu usage
[[inputs.cpu]]
  ## Whether to report per-cpu stats or not
  percpu = true
  ## Whether to report total system cpu stats or not
  totalcpu = true
  ## If true, collect raw CPU time metrics.
  collect_cpu_time = false
  ## If true, compute and report the sum of all non-idle CPU states.
  report_active = false


# Read metrics about disk usage by mount point
[[inputs.disk]]
  ## By default stats will be gathered for all mount points.
  ## Set mount_points will restrict the stats to only the specified mount points.
  # mount_points = ["/"]

  ## Ignore mount points by filesystem type.
  ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]


# Read metrics about disk IO by device
[[inputs.diskio]]
  ## By default, telegraf will gather stats for all devices including
  ## disk partitions.
  ## Setting devices will restrict the stats to the specified devices.
  # devices = ["sda", "sdb", "vd*"]
  ## Uncomment the following line if you need disk serial numbers.
  # skip_serial_number = false
  #
  ## On systems which support it, device metadata can be added in the form of
  ## tags.
  ## Currently only Linux is supported via udev properties. You can view
  ## available properties for a device by running:
  ## 'udevadm info -q property -n /dev/sda'
  ## Note: Most, but not all, udev properties can be accessed this way. Properties
  ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
  # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
  #
  ## Using the same metadata source as device_tags, you can also customize the
  ## name of the device via templates.
  ## The 'name_templates' parameter is a list of templates to try and apply to
  ## the device. The template may contain variables in the form of '$PROPERTY' or
  ## '${PROPERTY}'. The first template which does not contain any variables not
  ## present for the device is used as the device name tag.
  ## The typical use case is for LVM volumes, to get the VG/LV name instead of
  ## the near-meaningless DM-0 name.
  # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]


# Get kernel statistics from /proc/stat
[[inputs.kernel]]
  # no configuration


# Read metrics about memory usage
[[inputs.mem]]
  # no configuration


# Get the number of processes and group them by status
[[inputs.processes]]
  # no configuration


# Read metrics about swap memory usage
[[inputs.swap]]
  # no configuration


# Read metrics about system load & uptime
[[inputs.system]]
  ## Uncomment to remove deprecated metrics.
  # fielddrop = ["uptime_format"]


# # Gather ActiveMQ metrics
# [[inputs.activemq]]
#   ## ActiveMQ WebConsole URL
#   url = "http://127.0.0.1:8161"
#
#   ## Required ActiveMQ Endpoint
#   ##   deprecated in 1.11; use the url option
#   # server = "127.0.0.1"
#   # port = 8161
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## Required ActiveMQ webadmin root path
#   # webadmin = "admin"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read stats from aerospike server(s)
# [[inputs.aerospike]]
#   ## Aerospike servers to connect to (with port)
#   ## This plugin will query all namespaces the aerospike
#   ## server has configured and get stats for them.
#   servers = ["localhost:3000"]
#
#   # username = "telegraf"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # enable_tls = false
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true


# # Read Apache status information (mod_status)
# [[inputs.apache]]
#   ## An array of URLs to gather from, must be directed at the machine
#   ## readable version of the mod_status page including the auto query string.
#   ## Default is "http://localhost/server-status?auto".
#   urls = ["http://localhost/server-status?auto"]
#
#   ## Credentials for basic HTTP authentication.
#   # username = "myuser"
#   # password = "mypassword"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Monitor APC UPSes connected to apcupsd
# [[inputs.apcupsd]]
#   # A list of running apcupsd server to connect to.
#   # If not provided will default to tcp://127.0.0.1:3551
#   servers = ["tcp://127.0.0.1:3551"]
#
#   ## Timeout for dialing server.
#   timeout = "5s"


# # Gather metrics from Apache Aurora schedulers
# [[inputs.aurora]]
#   ## Schedulers are the base addresses of your Aurora Schedulers
#   schedulers = ["http://127.0.0.1:8081"]
#
#   ## Set of role types to collect metrics from.
#   ##
#   ## The scheduler roles are checked each interval by contacting the
#   ## scheduler nodes; zookeeper is not contacted.
#   # roles = ["leader", "follower"]
#
#   ## Timeout is the max time for total network operations.
#   # timeout = "5s"
#
#   ## Username and password are sent using HTTP Basic Auth.
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics of bcache from stats_total and dirty_data
# [[inputs.bcache]]
#   ## Bcache sets path
#   ## If not specified, then default is:
#   bcachePath = "/sys/fs/bcache"
#
#   ## By default, telegraf gather stats for all bcache devices
#   ## Setting devices will restrict the stats to the specified
#   ## bcache devices.
#   bcacheDevs = ["bcache0"]


# # Collects Beanstalkd server and tubes stats
# [[inputs.beanstalkd]]
#   ## Server to collect data from
#   server = "localhost:11300"
#
#   ## List of tubes to gather stats about.
#   ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
#   tubes = ["notifications"]


# # Read BIND nameserver XML statistics
# [[inputs.bind]]
#   ## An array of BIND XML statistics URI to gather stats.
#   ## Default is "http://localhost:8053/xml/v3".
#   # urls = ["http://localhost:8053/xml/v3"]
#   # gather_memory_contexts = false
#   # gather_views = false


# # Collect bond interface status, slaves statuses and failures count
# [[inputs.bond]]
#   ## Sets 'proc' directory path
#   ## If not specified, then default is /proc
#   # host_proc = "/proc"
#
#   ## By default, telegraf gather stats for all bond interfaces
#   ## Setting interfaces will restrict the stats to the specified
#   ## bond interfaces.
#   # bond_interfaces = ["bond0"]


# # Collect Kafka topics and consumers status from Burrow HTTP API.
# [[inputs.burrow]]
#   ## Burrow API endpoints in format "schema://host:port".
#   ## Default is "http://localhost:8000".
#   servers = ["http://localhost:8000"]
#
#   ## Override Burrow API prefix.
#   ## Useful when Burrow is behind reverse-proxy.
#   # api_prefix = "/v3/kafka"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Limit per-server concurrent connections.
#   ## Useful in case of large number of topics or consumer groups.
#   # concurrent_connections = 20
#
#   ## Filter clusters, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # clusters_include = []
#   # clusters_exclude = []
#
#   ## Filter consumer groups, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # groups_include = []
#   # groups_exclude = []
#
#   ## Filter topics, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # topics_include = []
#   # topics_exclude = []
#
#   ## Credentials for basic HTTP authentication.
#   # username = ""
#   # password = ""
#
#   ## Optional SSL config
#   # ssl_ca = "/etc/telegraf/ca.pem"
#   # ssl_cert = "/etc/telegraf/cert.pem"
#   # ssl_key = "/etc/telegraf/key.pem"
#   # insecure_skip_verify = false


# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
#   ## This is the recommended interval to poll.  Too frequent and you will lose
#   ## data points due to timeouts during rebalancing and recovery
#   interval = '1m'
#
#   ## All configuration values are optional, defaults are shown below
#
#   ## location of ceph binary
#   ceph_binary = "/usr/bin/ceph"
#
#   ## directory in which to look for socket files
#   socket_dir = "/var/run/ceph"
#
#   ## prefix of MON and OSD socket files, used to determine socket type
#   mon_prefix = "ceph-mon"
#   osd_prefix = "ceph-osd"
#
#   ## suffix used to identify socket files
#   socket_suffix = "asok"
#
#   ## Ceph user to authenticate as
#   ceph_user = "client.admin"
#
#   ## Ceph configuration to use to locate the cluster
#   ceph_config = "/etc/ceph/ceph.conf"
#
#   ## Whether to gather statistics via the admin socket
#   gather_admin_socket_stats = true
#
#   ## Whether to gather statistics via ceph commands
#   gather_cluster_stats = false


# # Read specific statistics per cgroup
# [[inputs.cgroup]]
#   ## Directories in which to look for files, globs are supported.
#   ## Consider restricting paths to the set of cgroups you really
#   ## want to monitor if you have a large number of cgroups, to avoid
#   ## any cardinality issues.
#   # paths = [
#   #   "/cgroup/memory",
#   #   "/cgroup/memory/child1",
#   #   "/cgroup/memory/child2/*",
#   # ]
#   ## cgroup stat fields, as file names, globs are supported.
#   ## these file names are appended to each path from above.
#   # files = ["memory.*usage*", "memory.limit_in_bytes"]


# # Get standard chrony metrics, requires chronyc executable.
# [[inputs.chrony]]
#   ## If true, chronyc tries to perform a DNS lookup for the time server.
#   # dns_lookup = false


# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
#   ## Amazon Region
#   region = "us-east-1"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   # access_key = ""
#   # secret_key = ""
#   # token = ""
#   # role_arn = ""
#   # profile = ""
#   # shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
#   # metrics are made available to the 1 minute period. Some are collected at
#   # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
#   # Note that if a period is configured that is smaller than the minimum for a
#   # particular metric, that metric will not be returned by the Cloudwatch API
#   # and will not be collected by Telegraf.
#   #
#   ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
#   period = "5m"
#
#   ## Collection Delay (required - must account for metrics availability via CloudWatch API)
#   delay = "5m"
#
#   ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
#   ## gaps or overlap in pulled data
#   interval = "5m"
#
#   ## Configure the TTL for the internal cache of metrics.
#   # cache_ttl = "1h"
#
#   ## Metric Statistic Namespace (required)
#   namespace = "AWS/ELB"
#
#   ## Maximum requests per second. Note that the global default AWS rate limit is
#   ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
#   ## maximum of 50.
#   ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
#   # ratelimit = 25
#
#   ## Namespace-wide statistic filters. These allow fewer queries to be made to
#   ## cloudwatch.
#   # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
#   # statistic_exclude = []
#
#   ## Metrics to Pull
#   ## Defaults to all Metrics in Namespace if nothing is provided
#   ## Refreshes Namespace available metrics every 1h
#   #[[inputs.cloudwatch.metrics]]
#   #  names = ["Latency", "RequestCount"]
#   #
#   #  ## Statistic filters for Metric.  These allow for retrieving specific
#   #  ## statistics for an individual metric.
#   #  # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
#   #  # statistic_exclude = []
#   #
#   #  ## Dimension filters for Metric.  All dimensions defined for the metric names
#   #  ## must be specified in order to retrieve the metric statistics.
#   #  [[inputs.cloudwatch.metrics.dimensions]]
#   #    name = "LoadBalancerName"
#   #    value = "p-example"


# # Gather health check statuses from services registered in Consul
# [[inputs.consul]]
#   ## Consul server address
#   # address = "localhost"
#
#   ## URI scheme for the Consul server, one of "http", "https"
#   # scheme = "http"
#
#   ## ACL token used in every request
#   # token = ""
#
#   ## HTTP Basic Authentication username and password.
#   # username = ""
#   # password = ""
#
#   ## Data center to query the health checks from
#   # datacenter = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true
#
#   ## Consul checks' tag splitting
#   # When tags are formatted like "key:value" with ":" as a delimiter then
#   # they will be splitted and reported as proper key:value in Telegraf
#   # tag_delimiter = ":"


# # Read metrics from one or many couchbase clusters
# [[inputs.couchbase]]
#   ## specify servers via a url matching:
#   ##  [protocol://][:password]@address[:port]
#   ##  e.g.
#   ##    http://couchbase-0.example.com/
#   ##    http://admin:secret@couchbase-0.example.com:8091/
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no protocol is specified, HTTP is used.
#   ## If no port is specified, 8091 is used.
#   servers = ["http://localhost:8091"]


# # Read CouchDB Stats from one or more servers
# [[inputs.couchdb]]
#   ## Works with CouchDB stats endpoints out of the box
#   ## Multiple Hosts from which to read CouchDB stats:
#   hosts = ["http://localhost:8086/_stats"]
#
#   ## Use HTTP Basic Authentication.
#   # basic_username = "telegraf"
#   # basic_password = "p@ssw0rd"


# # Input plugin for DC/OS metrics
# [[inputs.dcos]]
#   ## The DC/OS cluster URL.
#   cluster_url = "https://dcos-ee-master-1"
#
#   ## The ID of the service account.
#   service_account_id = "telegraf"
#   ## The private key file for the service account.
#   service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
#
#   ## Path containing login token.  If set, will read on every gather.
#   # token_file = "/home/dcos/.dcos/token"
#
#   ## In all filter options if both include and exclude are empty all items
#   ## will be collected.  Arrays may contain glob patterns.
#   ##
#   ## Node IDs to collect metrics from.  If a node is excluded, no metrics will
#   ## be collected for its containers or apps.
#   # node_include = []
#   # node_exclude = []
#   ## Container IDs to collect container metrics from.
#   # container_include = []
#   # container_exclude = []
#   ## Container IDs to collect app metrics from.
#   # app_include = []
#   # app_exclude = []
#
#   ## Maximum concurrent connections to the cluster.
#   # max_connections = 10
#   ## Maximum time to receive a response from cluster.
#   # response_timeout = "20s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true
#
#   ## Recommended filtering to reduce series cardinality.
#   # [inputs.dcos.tagdrop]
#   #   path = ["/var/lib/mesos/slave/slaves/*"]


# # Read metrics from one or many disque servers
# [[inputs.disque]]
#   ## An array of URI to gather stats about. Specify an ip or hostname
#   ## with optional port and password.
#   ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["localhost"]


# # Provide a native collection for dmsetup based statistics for dm-cache
# [[inputs.dmcache]]
#   ## Whether to report per-device stats or not
#   per_device = true


# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
#   ## servers to query
#   servers = ["8.8.8.8"]
#
#   ## Network is the network protocol name.
#   # network = "udp"
#
#   ## Domains or subdomains to query.
#   # domains = ["."]
#
#   ## Query record type.
#   ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
#   # record_type = "A"
#
#   ## Dns server port.
#   # port = 53
#
#   ## Query timeout in seconds.
#   # timeout = 2


# # Read metrics about docker containers
# [[inputs.docker]]
#   ## Docker Endpoint
#   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
#   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
#   endpoint = "unix:///var/run/docker.sock"
#
#   ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
#   gather_services = false
#
#   ## Only collect metrics for these containers, collect all if empty
#   container_names = []
#
#   ## Containers to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all containers
#   container_name_include = []
#   container_name_exclude = []
#
#   ## Container states to include and exclude. Globs accepted.
#   ## When empty only containers in the "running" state will be captured.
#   ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
#   ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
#   # container_state_include = []
#   # container_state_exclude = []
#
#   ## Timeout for docker list, info, and stats commands
#   timeout = "5s"
#
#   ## Whether to report for each container per-device blkio (8:0, 8:1...) and
#   ## network (eth0, eth1, ...) stats or not
#   perdevice = true
#   ## Whether to report for each container total blkio and network stats or not
#   total = false
#   ## Which environment variables should we use as a tag
#   ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
#
#   ## docker labels to include and exclude as tags.  Globs accepted.
#   ## Note that an empty array for both will include all labels as tags
#   docker_label_include = []
#   docker_label_exclude = []
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read statistics from one or many dovecot servers
# [[inputs.dovecot]]
#   ## specify dovecot servers via an address:port list
#   ##  e.g.
#   ##    localhost:24242
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["localhost:24242"]
#   ## Type is one of "user", "domain", "ip", or "global"
#   type = "global"
#   ## Wildcard matches like "*.com". An empty string "" is same as "*"
#   ## If type = "ip" filters should be <IP/network>
#   filters = [""]


# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints.
# [[inputs.ecs]]
#   ## ECS metadata url
#   # endpoint_url = "http://169.254.170.2"
#
#   ## Containers to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all containers
#   # container_name_include = []
#   # container_name_exclude = []
#
#   ## Container states to include and exclude. Globs accepted.
#   ## When empty only containers in the "RUNNING" state will be captured.
#   ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
#   ## "RESOURCES_PROVISIONED", "STOPPED".
#   # container_status_include = []
#   # container_status_exclude = []
#
#   ## ecs labels to include and exclude as tags.  Globs accepted.
#   ## Note that an empty array for both will include all labels as tags
#   ecs_label_include = [ "com.amazonaws.ecs.*" ]
#   ecs_label_exclude = []
#
#   ## Timeout for queries.
#   # timeout = "5s"


# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
#   ## specify a list of one or more Elasticsearch servers
#   # you can add username and password to your url to use basic authentication:
#   # servers = ["http://user:pass@localhost:9200"]
#   servers = ["http://localhost:9200"]
#
#   ## Timeout for HTTP requests to the elastic search server(s)
#   http_timeout = "5s"
#
#   ## When local is true (the default), the node will read only its own stats.
#   ## Set local to false when you want to read the node stats from all nodes
#   ## of the cluster.
#   local = true
#
#   ## Set cluster_health to true when you want to also obtain cluster health stats
#   cluster_health = false
#
#   ## Adjust cluster_health_level when you want to also obtain detailed health stats
#   ## The options are
#   ##  - indices (default)
#   ##  - cluster
#   # cluster_health_level = "indices"
#
#   ## Set cluster_stats to true when you want to also obtain cluster stats.
#   cluster_stats = false
#
#   ## Only gather cluster_stats from the master node. To work this require local = true
#   cluster_stats_only_from_master = true
#
#   ## Indices to collect; can be one or more indices names or _all
#   indices_include = ["_all"]
#
#   ## One of "shards", "cluster", "indices"
#   indices_level = "shards"
#
#   ## node_stats is a list of sub-stats that you want to have gathered. Valid options
#   ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
#   ## "breaker". Per default, all stats are gathered.
#   # node_stats = ["jvm", "http"]
#
#   ## HTTP Basic Authentication username and password.
#   # username = ""
#   # password = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]]
#   ## Commands array
#   commands = [
#     "/tmp/test.sh",
#     "/usr/bin/mycollector --foo=bar",
#     "/tmp/collect_*.sh"
#   ]
#
#   ## Timeout for each command to complete.
#   timeout = "5s"
#
#   ## measurement name suffix (for separating different commands)
#   name_suffix = "_mycollector"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
#   ## Use sudo to run fail2ban-client
#   use_sudo = false


# # Read devices value(s) from a Fibaro controller
# [[inputs.fibaro]]
#   ## Required Fibaro controller address/hostname.
#   ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
#   url = "http://<controller>:80"
#
#   ## Required credentials to access the API (http://<controller/api/<component>)
#   username = "<username>"
#   password = "<password>"
#
#   ## Amount of time allowed to complete the HTTP request
#   # timeout = "5s"


# # Reload and gather from file[s] on telegraf's interval.
# [[inputs.file]]
#   ## Files to parse each interval.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**.log     -> recursively find all .log files in /var/log
#   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
#   ##   /var/log/apache.log -> only read the apache log file
#   files = ["/var/log/apache/access.log"]
#
#   ## The dataformat to be read from files
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Count files in a directory
# [[inputs.filecount]]
#   ## Directory to gather stats about.
#   ##   deprecated in 1.9; use the directories option
#   # directory = "/var/cache/apt/archives"
#
#   ## Directories to gather stats about.
#   ## This accept standard unit glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**    -> recursively find all directories in /var/log and count files in each directories
#   ##   /var/log/*/*   -> find all directories with a parent dir in /var/log and count files in each directories
#   ##   /var/log       -> count all files in /var/log and all of its subdirectories
#   directories = ["/var/cache/apt/archives"]
#
#   ## Only count files that match the name pattern. Defaults to "*".
#   name = "*.deb"
#
#   ## Count files in subdirectories. Defaults to true.
#   recursive = false
#
#   ## Only count regular files. Defaults to true.
#   regular_only = true
#
#   ## Only count files that are at least this size. If size is
#   ## a negative number, only count files that are smaller than the
#   ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
#   ## Without quotes and units, interpreted as size in bytes.
#   size = "0B"
#
#   ## Only count files that have not been touched for at least this
#   ## duration. If mtime is negative, only count files that have been
#   ## touched in this duration. Defaults to "0s".
#   mtime = "0s"


# # Read stats about given file(s)
# [[inputs.filestat]]
#   ## Files to gather stats about.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
#   ##   "/var/log/apache.log" -> just tail the apache log file
#   ##
#   ## See https://github.com/gobwas/glob for more examples
#   ##
#   files = ["/var/log/**.log"]
#   ## If true, read the entire file and calculate an md5 checksum.
#   md5 = false


# # Read real time temps from fireboard.io servers
# [[inputs.fireboard]]
#   ## Specify auth token for your account
#   auth_token = "invalidAuthToken"
#   ## You can override the fireboard server URL if necessary
#   # url = https://fireboard.io/api/v1/devices.json
#   ## You can set a different http_timeout if you need to
#   ## You should set a string using an number and time indicator
#   ## for example "12s" for 12 seconds.
#   # http_timeout = "4s"


# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
#   ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
#   ##
#   ## Endpoint:
#   ## - only one URI is allowed
#   ## - https is not supported
#   endpoint = "http://localhost:24220/api/plugins.json"
#
#   ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
#   exclude = [
# 	  "monitor_agent",
# 	  "dummy",
#   ]


# # Gather repository information from GitHub hosted repositories.
# [[inputs.github]]
#   ## List of repositories to monitor.
#   repositories = [
# 	  "influxdata/telegraf",
# 	  "influxdata/influxdb"
#   ]
#
#   ## Github API access token.  Unauthenticated requests are limited to 60 per hour.
#   # access_token = ""
#
#   ## Github API enterprise url. Github Enterprise accounts must specify their base url.
#   # enterprise_base_url = ""
#
#   ## Timeout for HTTP requests.
#   # http_timeout = "5s"


# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
#   ## API endpoint, currently supported API:
#   ##
#   ##   - multiple  (Ex http://<host>:12900/system/metrics/multiple)
#   ##   - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
#   ##
#   ## For namespace endpoint, the metrics array will be ignored for that call.
#   ## Endpoint can contain namespace and multiple type calls.
#   ##
#   ## Please check http://[graylog-server-ip]:12900/api-browser for full list
#   ## of endpoints
#   servers = [
#     "http://[graylog-server-ip]:12900/system/metrics/multiple",
#   ]
#
#   ## Metrics list
#   ## List of metrics can be found on Graylog webservice documentation.
#   ## Or by hitting the the web service api at:
#   ##   http://[graylog-host]:12900/system/metrics
#   metrics = [
#     "jvm.cl.loaded",
#     "jvm.memory.pools.Metaspace.committed"
#   ]
#
#   ## Username and password
#   username = ""
#   password = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics of haproxy, via socket or csv stats page
# [[inputs.haproxy]]
#   ## An array of address to gather stats about. Specify an ip on hostname
#   ## with optional port. ie localhost, 10.10.3.33:1936, etc.
#   ## Make sure you specify the complete path to the stats endpoint
#   ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
#
#   ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
#   servers = ["http://myhaproxy.com:1936/haproxy?stats"]
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## You can also use local socket with standard wildcard globbing.
#   ## Server address not starting with 'http' will be treated as a possible
#   ## socket, so both examples below are valid.
#   # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
#
#   ## By default, some of the fields are renamed from what haproxy calls them.
#   ## Setting this option to true results in the plugin keeping the original
#   ## field names.
#   # keep_field_names = false
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Monitor disks' temperatures using hddtemp
# [[inputs.hddtemp]]
#   ## By default, telegraf gathers temps data from all disks detected by the
#   ## hddtemp.
#   ##
#   ## Only collect temps from the selected disks.
#   ##
#   ## A * as the device name will return the temperature values of all disks.
#   ##
#   # address = "127.0.0.1:7634"
#   # devices = ["sda", "*"]


# # Read formatted metrics from one or more HTTP endpoints
# [[inputs.http]]
#   ## One or more URLs from which to read formatted metrics
#   urls = [
#     "http://localhost/metrics"
#   ]
#
#   ## HTTP method
#   # method = "GET"
#
#   ## Optional HTTP headers
#   # headers = {"X-Special-Header" = "Special-Value"}
#
#   ## Optional HTTP Basic Auth Credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## HTTP entity-body to send with POST/PUT requests.
#   # body = ""
#
#   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
#   ## compress body or "identity" to apply no encoding.
#   # content_encoding = "identity"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Amount of time allowed to complete the HTTP request
#   # timeout = "5s"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
#   ## Deprecated in 1.12, use 'urls'
#   ## Server address (default http://localhost)
#   # address = "http://localhost"
#
#   ## List of urls to query.
#   # urls = ["http://localhost"]
#
#   ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
#   # http_proxy = "http://localhost:8888"
#
#   ## Set response_timeout (default 5 seconds)
#   # response_timeout = "5s"
#
#   ## HTTP Request Method
#   # method = "GET"
#
#   ## Whether to follow redirects from the server (defaults to false)
#   # follow_redirects = false
#
#   ## Optional HTTP Request Body
#   # body = '''
#   # {'fake':'data'}
#   # '''
#
#   ## Optional substring or regex match in body of the response
#   # response_string_match = "\"service_status\": \"up\""
#   # response_string_match = "ok"
#   # response_string_match = "\".*_status\".?:.?\"up\""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## HTTP Request Headers (all values must be strings)
#   # [inputs.http_response.headers]
#   #   Host = "github.com"
#
#   ## Interface to use when dialing an address
#   # interface = "eth0"


# # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]]
#   ## NOTE This plugin only reads numerical measurements, strings and booleans
#   ## will be ignored.
#
#   ## Name for the service being polled.  Will be appended to the name of the
#   ## measurement e.g. httpjson_webserver_stats
#   ##
#   ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
#   name = "webserver_stats"
#
#   ## URL of each server in the service's cluster
#   servers = [
#     "http://localhost:9999/stats/",
#     "http://localhost:9998/stats/",
#   ]
#   ## Set response_timeout (default 5 seconds)
#   response_timeout = "5s"
#
#   ## HTTP method to use: GET or POST (case-sensitive)
#   method = "GET"
#
#   ## List of tag names to extract from top-level of JSON server response
#   # tag_keys = [
#   #   "my_tag_1",
#   #   "my_tag_2"
#   # ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## HTTP parameters (all values must be strings).  For "GET" requests, data
#   ## will be included in the query.  For "POST" requests, data will be included
#   ## in the request body as "x-www-form-urlencoded".
#   # [inputs.httpjson.parameters]
#   #   event_type = "cpu_spike"
#   #   threshold = "0.75"
#
#   ## HTTP Headers (all values must be strings)
#   # [inputs.httpjson.headers]
#   #   X-Auth-Token = "my-xauth-token"
#   #   apiVersion = "v1"


# # Gather Icinga2 status
# [[inputs.icinga2]]
#   ## Required Icinga2 server address (default: "https://localhost:5665")
#   # server = "https://localhost:5665"
#
#   ## Required Icinga2 object type ("services" or "hosts, default "services")
#   # object_type = "services"
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true


# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.influxdb]]
#   ## Works with InfluxDB debug endpoints out of the box,
#   ## but other services can use this format too.
#   ## See the influxdb plugin's README for more details.
#
#   ## Multiple URLs from which to read InfluxDB-formatted JSON
#   ## Default is "http://localhost:8086/debug/vars".
#   urls = [
#     "http://localhost:8086/debug/vars"
#   ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## http request & header timeout
#   timeout = "5s"


# # Collect statistics about itself
# [[inputs.internal]]
#   ## If true, collect telegraf memory stats.
#   # collect_memstats = true


# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
# [[inputs.interrupts]]
#   ## When set to true, cpu metrics are tagged with the cpu.  Otherwise cpu is
#   ## stored as a field.
#   ##
#   ## The default is false for backwards compatibility, and will be changed to
#   ## true in a future version.  It is recommended to set to true on new
#   ## deployments.
#   # cpu_as_tag = false
#
#   ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
#   # [inputs.interrupts.tagdrop]
#   #   irq = [ "NET_RX", "TASKLET" ]


# # Read metrics from the bare metal servers via IPMI
# [[inputs.ipmi_sensor]]
#   ## optionally specify the path to the ipmitool executable
#   # path = "/usr/bin/ipmitool"
#   ##
#   ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
#   # privilege = "ADMINISTRATOR"
#   ##
#   ## optionally specify one or more servers via a url matching
#   ##  [username[:password]@][protocol[(address)]]
#   ##  e.g.
#   ##    root:passwd@lan(127.0.0.1)
#   ##
#   ## if no servers are specified, local machine sensor stats will be queried
#   ##
#   # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
#   ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
#   ## gaps or overlap in pulled data
#   interval = "30s"
#
#   ## Timeout for the ipmitool command to complete
#   timeout = "20s"
#
#   ## Schema Version: (Optional, defaults to version 1)
#   metric_version = 2


# # Gather packets and bytes counters from Linux ipsets
# [[inputs.ipset]]
#   ## By default, we only show sets which have already matched at least 1 packet.
#   ## set include_unmatched_sets = true to gather them all.
#   include_unmatched_sets = false
#   ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
#   use_sudo = false
#   ## The default timeout of 1s for ipset execution can be overridden here:
#   # timeout = "1s"


# # Read jobs and cluster metrics from Jenkins instances
# [[inputs.jenkins]]
#   ## The Jenkins URL
#   url = "http://my-jenkins-instance:8080"
#   # username = "admin"
#   # password = "admin"
#
#   ## Set response_timeout
#   response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use SSL but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional Max Job Build Age filter
#   ## Default 1 hour, ignore builds older than max_build_age
#   # max_build_age = "1h"
#
#   ## Optional Sub Job Depth filter
#   ## Jenkins can have unlimited layer of sub jobs
#   ## This config will limit the layers of pulling, default value 0 means
#   ## unlimited pulling until no more sub jobs
#   # max_subjob_depth = 0
#
#   ## Optional Sub Job Per Layer
#   ## In workflow-multibranch-plugin, each branch will be created as a sub job.
#   ## This config will limit to call only the lasted branches in each layer,
#   ## empty will use default value 10
#   # max_subjob_per_layer = 10
#
#   ## Jobs to exclude from gathering
#   # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
#
#   ## Nodes to exclude from gathering
#   # node_exclude = [ "node1", "node2" ]
#
#   ## Worker pool for jenkins plugin only
#   ## Empty this field will use default value 5
#   # max_connections = 5


# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
#   # DEPRECATED: the jolokia plugin has been deprecated in favor of the
#   # jolokia2 plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
#   ## This is the context root used to compose the jolokia url
#   ## NOTE that Jolokia requires a trailing slash at the end of the context root
#   ## NOTE that your jolokia security policy must allow for POST requests.
#   context = "/jolokia/"
#
#   ## This specifies the mode used
#   # mode = "proxy"
#   #
#   ## When in proxy mode this section is used to specify further
#   ## proxy address configurations.
#   ## Remember to change host address to fit your environment.
#   # [inputs.jolokia.proxy]
#   #   host = "127.0.0.1"
#   #   port = "8080"
#
#   ## Optional http timeouts
#   ##
#   ## response_header_timeout, if non-zero, specifies the amount of time to wait
#   ## for a server's response headers after fully writing the request.
#   # response_header_timeout = "3s"
#   ##
#   ## client_timeout specifies a time limit for requests made by this client.
#   ## Includes connection time, any redirects, and reading the response body.
#   # client_timeout = "4s"
#
#   ## Attribute delimiter
#   ##
#   ## When multiple attributes are returned for a single
#   ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
#   ## name, and the attribute name, separated by the given delimiter.
#   # delimiter = "_"
#
#   ## List of servers exposing jolokia read service
#   [[inputs.jolokia.servers]]
#     name = "as-server-01"
#     host = "127.0.0.1"
#     port = "8080"
#     # username = "myuser"
#     # password = "mypassword"
#
#   ## List of metrics collected on above servers
#   ## Each metric consists in a name, a jmx path and either
#   ## a pass or drop slice attribute.
#   ## This collect all heap memory usage metrics.
#   [[inputs.jolokia.metrics]]
#     name = "heap_memory_usage"
#     mbean  = "java.lang:type=Memory"
#     attribute = "HeapMemoryUsage"
#
#   ## This collect thread counts metrics.
#   [[inputs.jolokia.metrics]]
#     name = "thread_count"
#     mbean  = "java.lang:type=Threading"
#     attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
#
#   ## This collect number of class loaded/unloaded counts metrics.
#   [[inputs.jolokia.metrics]]
#     name = "class_count"
#     mbean  = "java.lang:type=ClassLoading"
#     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"


# # Read JMX metrics from a Jolokia REST agent endpoint
# [[inputs.jolokia2_agent]]
#   # default_tag_prefix      = ""
#   # default_field_prefix    = ""
#   # default_field_separator = "."
#
#   # Add agents URLs to query
#   urls = ["http://localhost:8080/jolokia"]
#   # username = ""
#   # password = ""
#   # response_timeout = "5s"
#
#   ## Optional TLS config
#   # tls_ca   = "/var/private/ca.pem"
#   # tls_cert = "/var/private/client.pem"
#   # tls_key  = "/var/private/client-key.pem"
#   # insecure_skip_verify = false
#
#   ## Add metrics to read
#   [[inputs.jolokia2_agent.metric]]
#     name  = "java_runtime"
#     mbean = "java.lang:type=Runtime"
#     paths = ["Uptime"]


# # Read JMX metrics from a Jolokia REST proxy endpoint
# [[inputs.jolokia2_proxy]]
#   # default_tag_prefix      = ""
#   # default_field_prefix    = ""
#   # default_field_separator = "."
#
#   ## Proxy agent
#   url = "http://localhost:8080/jolokia"
#   # username = ""
#   # password = ""
#   # response_timeout = "5s"
#
#   ## Optional TLS config
#   # tls_ca   = "/var/private/ca.pem"
#   # tls_cert = "/var/private/client.pem"
#   # tls_key  = "/var/private/client-key.pem"
#   # insecure_skip_verify = false
#
#   ## Add proxy targets to query
#   # default_target_username = ""
#   # default_target_password = ""
#   [[inputs.jolokia2_proxy.target]]
#     url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
#     # username = ""
#     # password = ""
#
#   ## Add metrics to read
#   [[inputs.jolokia2_proxy.metric]]
#     name  = "java_runtime"
#     mbean = "java.lang:type=Runtime"
#     paths = ["Uptime"]


# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.kapacitor]]
#   ## Multiple URLs from which to read Kapacitor-formatted JSON
#   ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
#   urls = [
#     "http://localhost:9092/kapacitor/v1/debug/vars"
#   ]
#
#   ## Time limit for http requests
#   timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read status information from one or more Kibana servers
# [[inputs.kibana]]
#   ## specify a list of one or more Kibana servers
#   servers = ["http://localhost:5601"]
#
#   ## Timeout for HTTP requests
#   timeout = "5s"
#
#   ## HTTP Basic Auth credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from the Kubernetes api
# [[inputs.kube_inventory]]
#   ## URL for the Kubernetes API
#   url = "https://127.0.0.1"
#
#   ## Namespace to use. Set to "" to use all namespaces.
#   # namespace = "default"
#
#   ## Use bearer token for authorization. ('bearer_token' takes priority)
#   # bearer_token = "/path/to/bearer/token"
#   ## OR
#   # bearer_token_string = "abc_123"
#
#   ## Set response_timeout (default 5 seconds)
#   # response_timeout = "5s"
#
#   ## Optional Resources to exclude from gathering
#   ## Leave them with blank with try to gather everything available.
#   ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
#   ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
#   # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
#
#   ## Optional Resources to include when gathering
#   ## Overrides resource_exclude if both set.
#   # resource_include = [ "deployments", "nodes", "statefulsets" ]
#
#   ## Optional TLS Config
#   # tls_ca = "/path/to/cafile"
#   # tls_cert = "/path/to/certfile"
#   # tls_key = "/path/to/keyfile"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
#   ## URL for the kubelet
#   url = "http://127.0.0.1:10255"
#
#   ## Use bearer token for authorization. ('bearer_token' takes priority)
#   # bearer_token = "/path/to/bearer/token"
#   ## OR
#   # bearer_token_string = "abc_123"
#
#   ## Set response_timeout (default 5 seconds)
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = /path/to/cafile
#   # tls_cert = /path/to/certfile
#   # tls_key = /path/to/keyfile
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
#   ## An array of URLs of the form:
#   ##   host [ ":" port]
#   servers = ["127.0.0.1:4020"]


# # Provides Linux sysctl fs metrics
# [[inputs.linux_sysctl_fs]]
#   # no configuration


# # Read metrics exposed by Logstash
# [[inputs.logstash]]
#   ## The URL of the exposed Logstash API endpoint.
#   url = "http://127.0.0.1:9600"
#
#   ## Use Logstash 5 single pipeline API, set to true when monitoring
#   ## Logstash 5.
#   # single_pipeline = false
#
#   ## Enable optional collection components.  Can contain
#   ## "pipelines", "process", and "jvm".
#   # collect = ["pipelines", "process", "jvm"]
#
#   ## Timeout for HTTP requests.
#   # timeout = "5s"
#
#   ## Optional HTTP Basic Auth credentials.
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config.
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Use TLS but skip chain & host verification.
#   # insecure_skip_verify = false
#
#   ## Optional HTTP headers.
#   # [inputs.logstash.headers]
#   #   "X-Special-Header" = "Special-Value"


# # Read metrics from local Lustre service on OST, MDS
# [[inputs.lustre2]]
#   ## An array of /proc globs to search for Lustre stats
#   ## If not specified, the default will work on Lustre 2.5.x
#   ##
#   # ost_procfiles = [
#   #   "/proc/fs/lustre/obdfilter/*/stats",
#   #   "/proc/fs/lustre/osd-ldiskfs/*/stats",
#   #   "/proc/fs/lustre/obdfilter/*/job_stats",
#   # ]
#   # mds_procfiles = [
#   #   "/proc/fs/lustre/mdt/*/md_stats",
#   #   "/proc/fs/lustre/mdt/*/job_stats",
#   # ]


# # Gathers metrics from the /3.0/reports MailChimp API
# [[inputs.mailchimp]]
#   ## MailChimp API key
#   ## get from https://admin.mailchimp.com/account/api/
#   api_key = "" # required
#   ## Reports for campaigns sent more than days_old ago will not be collected.
#   ## 0 means collect all.
#   days_old = 0
#   ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
#   # campaign_id = ""


# # Retrives information on a specific host in a MarkLogic Cluster
# [[inputs.marklogic]]
#   ## Base URL of the MarkLogic HTTP Server.
#   url = "http://localhost:8002"
#
#   ## List of specific hostnames to retrieve information. At least (1) required.
#   # hosts = ["hostname1", "hostname2"]
#
#   ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
#   # username = "myuser"
#   # password = "mypassword"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from one or many mcrouter servers
# [[inputs.mcrouter]]
#   ## An array of address to gather stats about. Specify an ip or hostname
#   ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
# 	servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
#
# 	## Timeout for metric collections from all servers.  Minimum timeout is "1s".
#   # timeout = "5s"


# # Read metrics from one or many memcached servers
# [[inputs.memcached]]
#   ## An array of address to gather stats about. Specify an ip on hostname
#   ## with optional port. ie localhost, 10.0.0.1:11211, etc.
#   servers = ["localhost:11211"]
#   # unix_sockets = ["/var/run/memcached.sock"]


# # Telegraf plugin for gathering metrics from N Mesos masters
# [[inputs.mesos]]
#   ## Timeout, in ms.
#   timeout = 100
#   ## A list of Mesos masters.
#   masters = ["http://localhost:5050"]
#   ## Master metrics groups to be collected, by default, all enabled.
#   master_collections = [
#     "resources",
#     "master",
#     "system",
#     "agents",
#     "frameworks",
#     "framework_offers",
#     "tasks",
#     "messages",
#     "evqueue",
#     "registrar",
#     "allocator",
#   ]
#   ## A list of Mesos slaves, default is []
#   # slaves = []
#   ## Slave metrics groups to be collected, by default, all enabled.
#   # slave_collections = [
#   #   "resources",
#   #   "agent",
#   #   "system",
#   #   "executors",
#   #   "tasks",
#   #   "messages",
#   # ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Collects scores from a Minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
#   ## Address of the Minecraft server.
#   # server = "localhost"
#
#   ## Server RCON Port.
#   # port = "25575"
#
#   ## Server RCON Password.
#   password = ""
#
#   ## Uncomment to remove deprecated metric components.
#   # tagdrop = ["server"]


# # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]]
#   ## An array of URLs of the form:
#   ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
#   ## For example:
#   ##   mongodb://user:auth_key@10.10.3.30:27017,
#   ##   mongodb://10.10.3.33:18832,
#   servers = ["mongodb://127.0.0.1:27017"]
#
#   ## When true, collect per database stats
#   # gather_perdb_stats = false
#
#   ## When true, collect per collection stats
#   # gather_col_stats = false
#
#   ## List of db where collections stats are collected
#   ## If empty, all db are concerned
#   # col_stats_dbs = ["local"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Aggregates the contents of multiple files into a single point
# [[inputs.multifile]]
#   ## Base directory where telegraf will look for files.
#   ## Omit this option to use absolute paths.
#   base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
#
#   ## If true, Telegraf discard all data when a single file can't be read.
#   ## Else, Telegraf omits the field generated from this file.
#   # fail_early = true
#
#   ## Files to parse each interval.
#   [[inputs.multifile.file]]
#     file = "in_pressure_input"
#     dest = "pressure"
#     conversion = "float"
#   [[inputs.multifile.file]]
#     file = "in_temp_input"
#     dest = "temperature"
#     conversion = "float(3)"
#   [[inputs.multifile.file]]
#     file = "in_humidityrelative_input"
#     dest = "humidityrelative"
#     conversion = "float(3)"


# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
#   ## specify servers via a url matching:
#   ##  [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
#   ##  see https://github.com/go-sql-driver/mysql#dsn-data-source-name
#   ##  e.g.
#   ##    servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
#   ##    servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
#   #
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["tcp(127.0.0.1:3306)/"]
#
#   ## Selects the metric output format.
#   ##
#   ## This option exists to maintain backwards compatibility, if you have
#   ## existing metrics do not set or change this value until you are ready to
#   ## migrate to the new format.
#   ##
#   ## If you do not have existing metrics from this plugin set to the latest
#   ## version.
#   ##
#   ## Telegraf >=1.6: metric_version = 2
#   ##           <1.6: metric_version = 1 (or unset)
#   metric_version = 2
#
#   ## the limits for metrics form perf_events_statements
#   perf_events_statements_digest_text_limit  = 120
#   perf_events_statements_limit              = 250
#   perf_events_statements_time_limit         = 86400
#   #
#   ## if the list is empty, then metrics are gathered from all databasee tables
#   table_schema_databases                    = []
#   #
#   ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
#   gather_table_schema                       = false
#   #
#   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
#   gather_process_list                       = true
#   #
#   ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
#   gather_user_statistics                    = true
#   #
#   ## gather auto_increment columns and max values from information schema
#   gather_info_schema_auto_inc               = true
#   #
#   ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
#   gather_innodb_metrics                     = true
#   #
#   ## gather metrics from SHOW SLAVE STATUS command output
#   gather_slave_status                       = true
#   #
#   ## gather metrics from SHOW BINARY LOGS command output
#   gather_binary_logs                        = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
#   gather_table_io_waits                     = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
#   gather_table_lock_waits                   = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
#   gather_index_io_waits                     = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
#   gather_event_waits                        = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
#   gather_file_events_stats                  = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
#   gather_perf_events_statements             = false
#   #
#   ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
#   interval_slow                   = "30m"
#
#   ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Provides metrics about the state of a NATS server
# [[inputs.nats]]
#   ## The address of the monitoring endpoint of the NATS server
#   server = "http://localhost:8222"
#
#   ## Maximum time to receive response
#   # response_timeout = "5s"


# # Neptune Apex data collector
# [[inputs.neptune_apex]]
#   ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
#   ## Measurements will be logged under "apex".
#
#   ## The base URL of the local Apex(es). If you specify more than one server, they will
#   ## be differentiated by the "source" tag.
#   servers = [
#     "http://apex.local",
#   ]
#
#   ## The response_timeout specifies how long to wait for a reply from the Apex.
#   #response_timeout = "5s"


# # Read metrics about network interface usage
# [[inputs.net]]
#   ## By default, telegraf gathers stats from any up interface (excluding loopback)
#   ## Setting interfaces will tell it to gather these explicit interfaces,
#   ## regardless of status.
#   ##
#   # interfaces = ["eth0"]
#   ##
#   ## On linux systems telegraf also collects protocol stats.
#   ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
#   ##
#   # ignore_protocol_stats = false
#   ##


# # Collect response time of a TCP or UDP connection
# [[inputs.net_response]]
#   ## Protocol, must be "tcp" or "udp"
#   ## NOTE: because the "udp" protocol does not respond to requests, it requires
#   ## a send/expect string pair (see below).
#   protocol = "tcp"
#   ## Server address (default localhost)
#   address = "localhost:80"
#
#   ## Set timeout
#   # timeout = "1s"
#
#   ## Set read timeout (only used if expecting a response)
#   # read_timeout = "1s"
#
#   ## The following options are required for UDP checks. For TCP, they are
#   ## optional. The plugin will send the given string to the server and then
#   ## expect to receive the given 'expect' string back.
#   ## string sent to the server
#   # send = "ssh"
#   ## expected string in answer
#   # expect = "ssh"
#
#   ## Uncomment to remove deprecated fields
#   # fielddrop = ["result_type", "string_found"]


# # Read TCP metrics such as established, time wait and sockets counts.
# [[inputs.netstat]]
#   # no configuration


# # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]]
#   # An array of Nginx stub_status URI to gather stats.
#   urls = ["http://localhost/server_status"]
#
#   ## Optional TLS Config
#   tls_ca = "/etc/telegraf/ca.pem"
#   tls_cert = "/etc/telegraf/cert.cer"
#   tls_key = "/etc/telegraf/key.key"
#   ## Use TLS but skip chain & host verification
#   insecure_skip_verify = false
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"


# # Read Nginx Plus' full status information (ngx_http_status_module)
# [[inputs.nginx_plus]]
#   ## An array of ngx_http_status_module or status URI to gather stats.
#   urls = ["http://localhost/status"]
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read Nginx Plus Api documentation
# [[inputs.nginx_plus_api]]
#   ## An array of API URI to gather stats.
#   urls = ["http://localhost/api"]
#
#   # Nginx API version, default: 3
#   # api_version = 3
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
# [[inputs.nginx_upstream_check]]
#   ## An URL where Nginx Upstream check module is enabled
#   ## It should be set to return a JSON formatted response
#   url = "http://127.0.0.1/status?format=json"
#
#   ## HTTP method
#   # method = "GET"
#
#   ## Optional HTTP headers
#   # headers = {"X-Special-Header" = "Special-Value"}
#
#   ## Override HTTP "Host" header
#   # host_header = "check.example.com"
#
#   ## Timeout for HTTP requests
#   timeout = "5s"
#
#   ## Optional HTTP Basic Auth credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read Nginx virtual host traffic status module information (nginx-module-vts)
# [[inputs.nginx_vts]]
#   ## An array of ngx_http_status_module or status URI to gather stats.
#   urls = ["http://localhost/status"]
#
#   ## HTTP response timeout (default: 5s)
#   response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read NSQ topic and channel statistics.
# [[inputs.nsq]]
#   ## An array of NSQD HTTP API endpoints
#   endpoints  = ["http://localhost:4151"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Collect kernel snmp counters and network interface statistics
# [[inputs.nstat]]
#   ## file paths for proc files. If empty default paths will be used:
#   ##    /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
#   ## These can also be overridden with env variables, see README.
#   proc_net_netstat = "/proc/net/netstat"
#   proc_net_snmp = "/proc/net/snmp"
#   proc_net_snmp6 = "/proc/net/snmp6"
#   ## dump metrics with 0 values too
#   dump_zeros       = true


# # Get standard NTP query metrics, requires ntpq executable.
# [[inputs.ntpq]]
#   ## If false, set the -n ntpq flag. Can reduce metric gather time.
#   dns_lookup = true


# # Pulls statistics from nvidia GPUs attached to the host
# [[inputs.nvidia_smi]]
#   ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
#   # bin_path = "/usr/bin/nvidia-smi"
#
#   ## Optional: timeout for GPU polling
#   # timeout = "5s"


# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
#   host = "localhost"
#   port = 389
#
#   # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
#   # note that port will likely need to be changed to 636 for ldaps
#   # valid options: "" | "starttls" | "ldaps"
#   tls = ""
#
#   # skip peer certificate verification. Default is false.
#   insecure_skip_verify = false
#
#   # Path to PEM-encoded Root certificate to use to verify server certificate
#   tls_ca = "/etc/ssl/certs.pem"
#
#   # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
#   bind_dn = ""
#   bind_password = ""
#
#   # Reverse metric names so they sort more naturally. Recommended.
#   # This defaults to false if unset, but is set to true when generating a new config
#   reverse_metric_names = true


# # Get standard NTP query metrics from OpenNTPD.
# [[inputs.openntpd]]
#   ## Run ntpctl binary with sudo.
#   # use_sudo = false
#
#   ## Location of the ntpctl binary.
#   # binary = "/usr/sbin/ntpctl"
#
#   ## Maximum time the ntpctl binary is allowed to run.
#   # timeout = "5ms"


# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver 
# [[inputs.opensmtpd]]
#   ## If running as a restricted user you can prepend sudo for additional access:
#   #use_sudo = false
#
#   ## The default location of the smtpctl binary can be overridden with:
#   binary = "/usr/sbin/smtpctl"
#
#   ## The default timeout of 1000ms can be overriden with (in milliseconds):
#   timeout = 1000


# # Read current weather and forecasts data from openweathermap.org
# [[inputs.openweathermap]]
#   ## OpenWeatherMap API key.
#   app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
#
#   ## City ID's to collect weather data from.
#   city_id = ["5391959"]
#
#   ## APIs to fetch; can contain "weather" or "forecast".
#   fetch = ["weather", "forecast"]
#
#   ## OpenWeatherMap base URL
#   # base_url = "https://api.openweathermap.org/"
#
#   ## Timeout for HTTP response.
#   # response_timeout = "5s"
#
#   ## Preferred unit system for temperature and wind speed. Can be one of
#   ## "metric", "imperial", or "standard".
#   # units = "metric"
#
#   ## Query interval; OpenWeatherMap updates their weather data every 10
#   ## minutes.
#   interval = "10m"


# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
#   ## Path of passenger-status.
#   ##
#   ## Plugin gather metric via parsing XML output of passenger-status
#   ## More information about the tool:
#   ##   https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
#   ##
#   ## If no path is specified, then the plugin simply execute passenger-status
#   ## hopefully it can be found in your PATH
#   command = "passenger-status -v --show=xml"


# # Gather counters from PF
# [[inputs.pf]]
#   ## PF require root access on most systems.
#   ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
#   ## Users must configure sudo to allow telegraf user to run pfctl with no password.
#   ## pfctl can be restricted to only list command "pfctl -s info".
#   use_sudo = false


# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
#   ## An array of addresses to gather stats about. Specify an ip or hostname
#   ## with optional port and path
#   ##
#   ## Plugin can be configured in three modes (either can be used):
#   ##   - http: the URL must start with http:// or https://, ie:
#   ##       "http://localhost/status"
#   ##       "http://192.168.130.1/status?full"
#   ##
#   ##   - unixsocket: path to fpm socket, ie:
#   ##       "/var/run/php5-fpm.sock"
#   ##      or using a custom fpm status path:
#   ##       "/var/run/php5-fpm.sock:fpm-custom-status-path"
#   ##
#   ##   - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
#   ##       "fcgi://10.0.0.12:9000/status"
#   ##       "cgi://10.0.10.12:9001/status"
#   ##
#   ## Example of multiple gathering from local socket and remote host
#   ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
#   urls = ["http://localhost/status"]
#
#   ## Duration allowed to complete HTTP requests.
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Ping given url(s) and return statistics
# [[inputs.ping]]
#   ## List of urls to ping
#   urls = ["example.org"]
#
#   ## Number of pings to send per collection (ping -c <COUNT>)
#   # count = 1
#
#   ## Interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
#   # ping_interval = 1.0
#
#   ## Per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
#   # timeout = 1.0
#
#   ## Total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
#   # deadline = 10
#
#   ## Interface or source address to send ping from (ping -I[-S] <INTERFACE/SRC_ADDR>)
#   # interface = ""
#
#   ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'.
#   # method = "exec"
#
#   ## Specify the ping executable binary, default is "ping"
# 	# binary = "ping"
#
#   ## Arguments for ping command. When arguments is not empty, system binary will be used and
#   ## other options (ping_interval, timeout, etc) will be ignored.
#   # arguments = ["-c", "3"]
#
#   ## Use only ipv6 addresses when resolving hostnames.
#   # ipv6 = false


# # Measure postfix queue statistics
# [[inputs.postfix]]
#   ## Postfix queue directory. If not provided, telegraf will try to use
#   ## 'postconf -h queue_directory' to determine it.
#   # queue_directory = "/var/spool/postfix"


# # Read metrics from one or many PowerDNS servers
# [[inputs.powerdns]]
#   ## An array of sockets to gather stats about.
#   ## Specify a path to unix socket.
#   unix_sockets = ["/var/run/pdns.controlsocket"]


# # Read metrics from one or many PowerDNS Recursor servers
# [[inputs.powerdns_recursor]]
#   ## Path to the Recursor control socket.
#   unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
#
#   ## Directory to create receive socket.  This default is likely not writable,
#   ## please reference the full plugin documentation for a recommended setup.
#   # socket_dir = "/var/run/"
#   ## Socket permissions for the receive socket.
#   # socket_mode = "0666"


# # Monitor process cpu and memory usage
# [[inputs.procstat]]
#   ## PID file to monitor process
#   pid_file = "/var/run/nginx.pid"
#   ## executable name (ie, pgrep <exe>)
#   # exe = "nginx"
#   ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
#   # pattern = "nginx"
#   ## user as argument for pgrep (ie, pgrep -u <user>)
#   # user = "nginx"
#   ## Systemd unit name
#   # systemd_unit = "nginx.service"
#   ## CGroup name or path
#   # cgroup = "systemd/system.slice/nginx.service"
#
#   ## Windows service name
#   # win_service = ""
#
#   ## override for process_name
#   ## This is optional; default is sourced from /proc/<pid>/status
#   # process_name = "bar"
#
#   ## Field name prefix
#   # prefix = ""
#
#   ## When true add the full cmdline as a tag.
#   # cmdline_tag = false
#
#   ## Add PID as a tag instead of a field; useful to differentiate between
#   ## processes whose tags are otherwise the same.  Can create a large number
#   ## of series, use judiciously.
#   # pid_tag = false
#
#   ## Method to use when finding process IDs.  Can be one of 'pgrep', or
#   ## 'native'.  The pgrep finder calls the pgrep executable in the PATH while
#   ## the native finder performs the search directly in a manor dependent on the
#   ## platform.  Default is 'pgrep'
#   # pid_finder = "pgrep"


# # Reads last_run_summary.yaml file and converts to measurments
# [[inputs.puppetagent]]
#   ## Location of puppet last run summary file
#   location = "/var/lib/puppet/state/last_run_summary.yaml"


# # Reads metrics from RabbitMQ servers via the Management Plugin
# [[inputs.rabbitmq]]
#   ## Management Plugin url. (default: http://localhost:15672)
#   # url = "http://localhost:15672"
#   ## Tag added to rabbitmq_overview series; deprecated: use tags
#   # name = "rmq-server-1"
#   ## Credentials
#   # username = "guest"
#   # password = "guest"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional request timeouts
#   ##
#   ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
#   ## for a server's response headers after fully writing the request.
#   # header_timeout = "3s"
#   ##
#   ## client_timeout specifies a time limit for requests made by this client.
#   ## Includes connection time, any redirects, and reading the response body.
#   # client_timeout = "4s"
#
#   ## A list of nodes to gather as the rabbitmq_node measurement. If not
#   ## specified, metrics for all nodes are gathered.
#   # nodes = ["rabbit@node1", "rabbit@node2"]
#
#   ## A list of queues to gather as the rabbitmq_queue measurement. If not
#   ## specified, metrics for all queues are gathered.
#   # queues = ["telegraf"]
#
#   ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
#   ## specified, metrics for all exchanges are gathered.
#   # exchanges = ["telegraf"]
#
#   ## Queues to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all queues
#   queue_name_include = []
#   queue_name_exclude = []


# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
# [[inputs.raindrops]]
#   ## An array of raindrops middleware URI to gather stats.
#   urls = ["http://localhost:8080/_raindrops"]


# # Read metrics from one or many redis servers
# [[inputs.redis]]
#   ## specify servers via a url matching:
#   ##  [protocol://][:password]@address[:port]
#   ##  e.g.
#   ##    tcp://localhost:6379
#   ##    tcp://:password@192.168.99.100
#   ##    unix:///var/run/redis.sock
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no port is specified, 6379 is used
#   servers = ["tcp://localhost:6379"]
#
#   ## specify server password
#   # password = "s#cr@t%"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true


# # Read metrics from one or many RethinkDB servers
# [[inputs.rethinkdb]]
#   ## An array of URI to gather stats about. Specify an ip or hostname
#   ## with optional port add password. ie,
#   ##   rethinkdb://user:auth_key@10.10.3.30:28105,
#   ##   rethinkdb://10.10.3.33:18832,
#   ##   10.0.0.1:10000, etc.
#   servers = ["127.0.0.1:28015"]
#   ##
#   ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
#   ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
#   # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
#   ##
#   ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
#   ## have to be named "rethinkdb".
#   # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]


# # Read metrics one or many Riak servers
# [[inputs.riak]]
#   # Specify a list of one or more riak http servers
#   servers = ["http://localhost:8098"]


# # Read API usage and limits for a Salesforce organisation
# [[inputs.salesforce]]
#   ## specify your credentials
#   ##
#   username = "your_username"
#   password = "your_password"
#   ##
#   ## (optional) security token
#   # security_token = "your_security_token"
#   ##
#   ## (optional) environment type (sandbox or production)
#   ## default is: production
#   ##
#   # environment = "production"
#   ##
#   ## (optional) API version (default: "39.0")
#   ##
#   # version = "39.0"


# # Read metrics from storage devices supporting S.M.A.R.T.
# [[inputs.smart]]
#   ## Optionally specify the path to the smartctl executable
#   # path = "/usr/bin/smartctl"
#
#   ## On most platforms smartctl requires root access.
#   ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
#   ## Sudo must be configured to to allow the telegraf user to run smartctl
#   ## without a password.
#   # use_sudo = false
#
#   ## Skip checking disks in this power mode. Defaults to
#   ## "standby" to not wake up disks that have stoped rotating.
#   ## See --nocheck in the man pages for smartctl.
#   ## smartctl version 5.41 and 5.42 have faulty detection of
#   ## power mode and might require changing this value to
#   ## "never" depending on your disks.
#   # nocheck = "standby"
#
#   ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
#   ## information from each drive into the 'smart_attribute' measurement.
#   # attributes = false
#
#   ## Optionally specify devices to exclude from reporting.
#   # excludes = [ "/dev/pass6" ]
#
#   ## Optionally specify devices and device type, if unset
#   ## a scan (smartctl --scan) for S.M.A.R.T. devices will
#   ## done and all found will be included except for the
#   ## excluded in excludes.
#   # devices = [ "/dev/ada0 -d atacam" ]
#
#   ## Timeout for the smartctl command to complete.
#   # timeout = "30s"


# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
#   agents = [ "127.0.0.1:161" ]
#   ## Timeout for each SNMP query.
#   timeout = "5s"
#   ## Number of retries to attempt within timeout.
#   retries = 3
#   ## SNMP version, values can be 1, 2, or 3
#   version = 2
#
#   ## SNMP community string.
#   community = "public"
#
#   ## The GETBULK max-repetitions parameter
#   max_repetitions = 10
#
#   ## SNMPv3 auth parameters
#   #sec_name = "myuser"
#   #auth_protocol = "md5"      # Values: "MD5", "SHA", ""
#   #auth_password = "pass"
#   #sec_level = "authNoPriv"   # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
#   #context_name = ""
#   #priv_protocol = ""         # Values: "DES", "AES", ""
#   #priv_password = ""
#
#   ## measurement name
#   name = "system"
#   [[inputs.snmp.field]]
#     name = "hostname"
#     oid = ".1.0.0.1.1"
#   [[inputs.snmp.field]]
#     name = "uptime"
#     oid = ".1.0.0.1.2"
#   [[inputs.snmp.field]]
#     name = "load"
#     oid = ".1.0.0.1.3"
#   [[inputs.snmp.field]]
#     oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
#   [[inputs.snmp.table]]
#     ## measurement name
#     name = "remote_servers"
#     inherit_tags = [ "hostname" ]
#     [[inputs.snmp.table.field]]
#       name = "server"
#       oid = ".1.0.0.0.1.0"
#       is_tag = true
#     [[inputs.snmp.table.field]]
#       name = "connections"
#       oid = ".1.0.0.0.1.1"
#     [[inputs.snmp.table.field]]
#       name = "latency"
#       oid = ".1.0.0.0.1.2"
#
#   [[inputs.snmp.table]]
#     ## auto populate table's fields using the MIB
#     oid = "HOST-RESOURCES-MIB::hrNetworkTable"


# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
#   ## Use 'oids.txt' file to translate oids to names
#   ## To generate 'oids.txt' you need to run:
#   ##   snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
#   ## Or if you have an other MIB folder with custom MIBs
#   ##   snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
#   snmptranslate_file = "/tmp/oids.txt"
#   [[inputs.snmp.host]]
#     address = "192.168.2.2:161"
#     # SNMP community
#     community = "public" # default public
#     # SNMP version (1, 2 or 3)
#     # Version 3 not supported yet
#     version = 2 # default 2
#     # SNMP response timeout
#     timeout = 2.0 # default 2.0
#     # SNMP request retries
#     retries = 2 # default 2
#     # Which get/bulk do you want to collect for this host
#     collect = ["mybulk", "sysservices", "sysdescr"]
#     # Simple list of OIDs to get, in addition to "collect"
#     get_oids = []
#
#   [[inputs.snmp.host]]
#     address = "192.168.2.3:161"
#     community = "public"
#     version = 2
#     timeout = 2.0
#     retries = 2
#     collect = ["mybulk"]
#     get_oids = [
#         "ifNumber",
#         ".1.3.6.1.2.1.1.3.0",
#     ]
#
#   [[inputs.snmp.get]]
#     name = "ifnumber"
#     oid = "ifNumber"
#
#   [[inputs.snmp.get]]
#     name = "interface_speed"
#     oid = "ifSpeed"
#     instance = "0"
#
#   [[inputs.snmp.get]]
#     name = "sysuptime"
#     oid = ".1.3.6.1.2.1.1.3.0"
#     unit = "second"
#
#   [[inputs.snmp.bulk]]
#     name = "mybulk"
#     max_repetition = 127
#     oid = ".1.3.6.1.2.1.1"
#
#   [[inputs.snmp.bulk]]
#     name = "ifoutoctets"
#     max_repetition = 127
#     oid = "ifOutOctets"
#
#   [[inputs.snmp.host]]
#     address = "192.168.2.13:161"
#     #address = "127.0.0.1:161"
#     community = "public"
#     version = 2
#     timeout = 2.0
#     retries = 2
#     #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
#     collect = ["sysuptime" ]
#     [[inputs.snmp.host.table]]
#       name = "iftable3"
#       include_instances = ["enp5s0", "eth1"]
#
#   # SNMP TABLEs
#   # table without mapping neither subtables
#   [[inputs.snmp.table]]
#     name = "iftable1"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#
#   # table without mapping but with subtables
#   [[inputs.snmp.table]]
#     name = "iftable2"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
#
#   # table with mapping but without subtables
#   [[inputs.snmp.table]]
#     name = "iftable3"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     # if empty. get all instances
#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
#     # if empty, get all subtables
#
#   # table with both mapping and subtables
#   [[inputs.snmp.table]]
#     name = "iftable4"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     # if empty get all instances
#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
#     # if empty get all subtables
#     # sub_tables could be not "real subtables"
#     sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]


# # Read stats from one or more Solr servers or cores
# [[inputs.solr]]
#   ## specify a list of one or more Solr servers
#   servers = ["http://localhost:8983"]
#
#   ## specify a list of one or more Solr cores (default - all)
#   # cores = ["main"]
#
#   ## Optional HTTP Basic Auth Credentials
#   # username = "username"
#   # password = "pa$$word"


# # Read metrics from Microsoft SQL Server
# [[inputs.sqlserver]]
#   ## Specify instances to monitor with a list of connection strings.
#   ## All connection parameters are optional.
#   ## By default, the host is localhost, listening on default port, TCP 1433.
#   ##   for Windows, the user is the currently running AD user (SSO).
#   ##   See https://github.com/denisenkom/go-mssqldb for detailed connection
#   ##   parameters.
#   # servers = [
#   #  "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
#   # ]
#
#   ## Optional parameter, setting this to 2 will use a new version
#   ## of the collection queries that break compatibility with the original
#   ## dashboards.
#   query_version = 2
#
#   ## If you are using AzureDB, setting this to true will gather resource utilization metrics
#   # azuredb = false
#
#   ## If you would like to exclude some of the metrics queries, list them here
#   ## Possible choices:
#   ## - PerformanceCounters
#   ## - WaitStatsCategorized
#   ## - DatabaseIO
#   ## - DatabaseProperties
#   ## - CPUHistory
#   ## - DatabaseSize
#   ## - DatabaseStats
#   ## - MemoryClerk
#   ## - VolumeSpace
#   ## - PerformanceMetrics
#   ## - Schedulers
#   ## - AzureDBResourceStats
#   ## - AzureDBResourceGovernance
#   ## - SqlRequests
#   ## - ServerProperties
#   exclude_query = [ 'Schedulers' ]


# # Gather timeseries from Google Cloud Platform v3 monitoring API
# [[inputs.stackdriver]]
#   ## GCP Project
#   project = "erudite-bloom-151019"
#
#   ## Include timeseries that start with the given metric type.
#   metric_type_prefix_include = [
#     "compute.googleapis.com/",
#   ]
#
#   ## Exclude timeseries that start with the given metric type.
#   # metric_type_prefix_exclude = []
#
#   ## Many metrics are updated once per minute; it is recommended to override
#   ## the agent level interval with a value of 1m or greater.
#   interval = "1m"
#
#   ## Maximum number of API calls to make per second.  The quota for accounts
#   ## varies, it can be viewed on the API dashboard:
#   ##   https://cloud.google.com/monitoring/quotas#quotas_and_limits
#   # rate_limit = 14
#
#   ## The delay and window options control the number of points selected on
#   ## each gather.  When set, metrics are gathered between:
#   ##   start: now() - delay - window
#   ##   end:   now() - delay
#   #
#   ## Collection delay; if set too low metrics may not yet be available.
#   # delay = "5m"
#   #
#   ## If unset, the window will start at 1m and be updated dynamically to span
#   ## the time between calls (approximately the length of the plugin interval).
#   # window = "1m"
#
#   ## TTL for cached list of metric types.  This is the maximum amount of time
#   ## it may take to discover new metrics.
#   # cache_ttl = "1h"
#
#   ## If true, raw bucket counts are collected for distribution value types.
#   ## For a more lightweight collection, you may wish to disable and use
#   ## distribution_aggregation_aligners instead.
#   # gather_raw_distribution_buckets = true
#
#   ## Aggregate functions to be used for metrics whose value type is
#   ## distribution.  These aggregate values are recorded in in addition to raw
#   ## bucket counts; if they are enabled.
#   ##
#   ## For a list of aligner strings see:
#   ##   https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
#   # distribution_aggregation_aligners = [
#   # 	"ALIGN_PERCENTILE_99",
#   # 	"ALIGN_PERCENTILE_95",
#   # 	"ALIGN_PERCENTILE_50",
#   # ]
#
#   ## Filters can be added to reduce the number of time series matched.  All
#   ## functions are supported: starts_with, ends_with, has_substring, and
#   ## one_of.  Only the '=' operator is supported.
#   ##
#   ## The logical operators when combining filters are defined statically using
#   ## the following values:
#   ##   filter ::= <resource_labels> {AND <metric_labels>}
#   ##   resource_labels ::= <resource_labels> {OR <resource_label>}
#   ##   metric_labels ::= <metric_labels> {OR <metric_label>}
#   ##
#   ## For more details, see https://cloud.google.com/monitoring/api/v3/filters
#   #
#   ## Resource labels refine the time series selection with the following expression:
#   ##   resource.labels.<key> = <value>
#   # [[inputs.stackdriver.filter.resource_labels]]
#   #   key = "instance_name"
#   #   value = 'starts_with("localhost")'
#   #
#   ## Metric labels refine the time series selection with the following expression:
#   ##   metric.labels.<key> = <value>
#   #  [[inputs.stackdriver.filter.metric_labels]]
#   #  	 key = "device_name"
#   #  	 value = 'one_of("sda", "sdb")'


# # Reads metrics from a Teamspeak 3 Server via ServerQuery
# [[inputs.teamspeak]]
#   ## Server address for Teamspeak 3 ServerQuery
#   # server = "127.0.0.1:10011"
#   ## Username for ServerQuery
#   username = "serverqueryuser"
#   ## Password for ServerQuery
#   password = "secret"
#   ## Array of virtual servers
#   # virtual_servers = [1]


# # Read metrics about temperature
# [[inputs.temp]]
#   # no configuration


# # Read Tengine's basic status information (ngx_http_reqstat_module)
# [[inputs.tengine]]
#   # An array of Tengine reqstat module URI to gather stats.
#   urls = ["http://127.0.0.1/us"]
#
#   # HTTP response timeout (default: 5s)
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.cer"
#   # tls_key = "/etc/telegraf/key.key"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
#   ## URL of the Tomcat server status
#   # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
#
#   ## HTTP Basic Auth Credentials
#   # username = "tomcat"
#   # password = "s3cret"
#
#   ## Request timeout
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]]
#   ## Set the amplitude
#   amplitude = 10.0


# # Read Twemproxy stats data
# [[inputs.twemproxy]]
#   ## Twemproxy stats address and port (no scheme)
#   addr = "localhost:22222"
#   ## Monitor pool name
#   pools = ["redis_pool", "mc_pool"]


# # A plugin to collect stats from the Unbound DNS resolver
# [[inputs.unbound]]
#   ## Address of server to connect to, read from unbound conf default, optionally ':port'
#   ## Will lookup IP if given a hostname
#   server = "127.0.0.1:8953"
#
#   ## If running as a restricted user you can prepend sudo for additional access:
#   # use_sudo = false
#
#   ## The default location of the unbound-control binary can be overridden with:
#   # binary = "/usr/sbin/unbound-control"
#
#   ## The default timeout of 1s can be overriden with:
#   # timeout = "1s"
#
#   ## When set to true, thread metrics are tagged with the thread id.
#   ##
#   ## The default is false for backwards compatibility, and will be changed to
#   ## true in a future version.  It is recommended to set to true on new
#   ## deployments.
#   thread_as_tag = false


# # Read uWSGI metrics.
# [[inputs.uwsgi]]
#   ## List with urls of uWSGI Stats servers. URL must match pattern:
#   ## scheme://address[:port]
#   ##
#   ## For example:
#   ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
#   servers = ["tcp://127.0.0.1:1717"]
#
#   ## General connection timout
#   # timeout = "5s"


# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
#   ## If running as a restricted user you can prepend sudo for additional access:
#   #use_sudo = false
#
#   ## The default location of the varnishstat binary can be overridden with:
#   binary = "/usr/bin/varnishstat"
#
#   ## By default, telegraf gather stats for 3 metric points.
#   ## Setting stats will override the defaults shown below.
#   ## Glob matching can be used, ie, stats = ["MAIN.*"]
#   ## stats may also be set to ["*"], which will collect all stats
#   stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
#
#   ## Optional name for the varnish instance (or working directory) to query
#   ## Usually appened after -n in varnish cli
#   # instance_name = instanceName
#
#   ## Timeout for varnishstat command
#   # timeout = "1s"


# # Monitor wifi signal strength and quality
# [[inputs.wireless]]
#   ## Sets 'proc' directory path
#   ## If not specified, then default is /proc
#   # host_proc = "/proc"


# # Reads metrics from a SSL certificate
# [[inputs.x509_cert]]
#   ## List certificate sources
#   sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
#
#   ## Timeout for SSL connection
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"


# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
# [[inputs.zfs]]
#   ## ZFS kstat path. Ignored on FreeBSD
#   ## If not specified, then default is:
#   # kstatPath = "/proc/spl/kstat/zfs"
#
#   ## By default, telegraf gather all zfs stats
#   ## If not specified, then default is:
#   # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#   ## For Linux, the default is:
#   # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
#   #   "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
#   ## By default, don't gather zpool stats
#   # poolMetrics = false


# # Reads 'mntr' stats from one or many zookeeper servers
# [[inputs.zookeeper]]
#   ## An array of address to gather stats about. Specify an ip or hostname
#   ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
#
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no port is specified, 2181 is used
#   servers = [":2181"]
#
#   ## Timeout for metric collections from all servers.  Minimum timeout is "1s".
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # enable_tls = true
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true


###############################################################################
#                            SERVICE INPUT PLUGINS                            #
###############################################################################


# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
#   ## Broker to consume from.
#   ##   deprecated in 1.7; use the brokers option
#   # url = "amqp://localhost:5672/influxdb"
#
#   ## Brokers to consume from.  If multiple brokers are specified a random broker
#   ## will be selected anytime a connection is established.  This can be
#   ## helpful for load balancing when not using a dedicated load balancer.
#   brokers = ["amqp://localhost:5672/influxdb"]
#
#   ## Authentication credentials for the PLAIN auth_method.
#   # username = ""
#   # password = ""
#
#   ## Name of the exchange to declare.  If unset, no exchange will be declared.
#   exchange = "telegraf"
#
#   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
#   # exchange_type = "topic"
#
#   ## If true, exchange will be passively declared.
#   # exchange_passive = false
#
#   ## Exchange durability can be either "transient" or "durable".
#   # exchange_durability = "durable"
#
#   ## Additional exchange arguments.
#   # exchange_arguments = { }
#   # exchange_arguments = {"hash_propery" = "timestamp"}
#
#   ## AMQP queue name.
#   queue = "telegraf"
#
#   ## AMQP queue durability can be "transient" or "durable".
#   queue_durability = "durable"
#
#   ## If true, queue will be passively declared.
#   # queue_passive = false
#
#   ## A binding between the exchange and queue using this binding key is
#   ## created.  If unset, no binding is created.
#   binding_key = "#"
#
#   ## Maximum number of messages server should give to the worker.
#   # prefetch_count = 50
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Auth method. PLAIN and EXTERNAL are supported
#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
#   ## described here: https://www.rabbitmq.com/plugins.html
#   # auth_method = "PLAIN"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Content encoding for message payloads, can be set to "gzip" to or
#   ## "identity" to apply no encoding.
#   # content_encoding = "identity"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
#   ## DEPRECATED: The cassandra plugin has been deprecated.  Please use the
#   ## jolokia2 plugin instead.
#   ##
#   ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
#   context = "/jolokia/read"
#   ## List of cassandra servers exposing jolokia read service
#   servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
#   ## List of metrics collected on above servers
#   ## Each metric consists of a jmx path.
#   ## This will collect all heap memory usage metrics from the jvm and
#   ## ReadLatency metrics for all keyspaces and tables.
#   ## "type=Table" in the query works with Cassandra3.0. Older versions might
#   ## need to use "type=ColumnFamily"
#   metrics  = [
#     "/java.lang:type=Memory/HeapMemoryUsage",
#     "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
#   ]


# # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR
# [[inputs.cisco_telemetry_gnmi]]
#  ## Address and port of the GNMI GRPC server
#  addresses = ["10.49.234.114:57777"]
#
#  ## define credentials
#  username = "cisco"
#  password = "cisco"
#
#  ## GNMI encoding requested (one of: "proto", "json", "json_ietf")
#  # encoding = "proto"
#
#  ## redial in case of failures after
#  redial = "10s"
#
#  ## enable client-side TLS and define CA to authenticate the device
#  # enable_tls = true
#  # tls_ca = "/etc/telegraf/ca.pem"
#  # insecure_skip_verify = true
#
#  ## define client-side TLS certificate & key to authenticate to the device
#  # tls_cert = "/etc/telegraf/cert.pem"
#  # tls_key = "/etc/telegraf/key.pem"
#
#  ## GNMI subscription prefix (optional, can usually be left empty)
#  ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
#  # origin = ""
#  # prefix = ""
#  # target = ""
#
#  ## Define additional aliases to map telemetry encoding paths to simple measurement names
#  #[inputs.cisco_telemetry_gnmi.aliases]
#  #  ifcounters = "openconfig:/interfaces/interface/state/counters"
#
#  [[inputs.cisco_telemetry_gnmi.subscription]]
#   ## Name of the measurement that will be emitted
#   name = "ifcounters"
#
#   ## Origin and path of the subscription
#   ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
#   ##
#   ## origin usually refers to a (YANG) data model implemented by the device
#   ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath)
#   ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
#   origin = "openconfig-interfaces"
#   path = "/interfaces/interface/state/counters"
#
#   # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
#   subscription_mode = "sample"
#   sample_interval = "10s"
#
#   ## Suppress redundant transmissions when measured values are unchanged
#   # suppress_redundant = false
#
#   ## If suppression is enabled, send updates at least every X seconds anyway
#   # heartbeat_interval = "60s"


# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
# [[inputs.cisco_telemetry_mdt]]
#  ## Telemetry transport can be "tcp" or "grpc".  TLS is only supported when
#  ## using the grpc transport.
#  transport = "grpc"
#
#  ## Address and port to host telemetry listener
#  service_address = ":57000"
#
#  ## Enable TLS; grpc transport only.
#  # tls_cert = "/etc/telegraf/cert.pem"
#  # tls_key = "/etc/telegraf/key.pem"
#
#  ## Enable TLS client authentication and define allowed CA certificates; grpc
#  ##  transport only.
#  # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#  ## Define aliases to map telemetry encoding paths to simple measurement names
#  [inputs.cisco_telemetry_mdt.aliases]
#    ifstats = "ietf-interfaces:interfaces-state/interface/statistics"


# # Read metrics from Google PubSub
# [[inputs.cloud_pubsub]]
#   ## Required. Name of Google Cloud Platform (GCP) Project that owns
#   ## the given PubSub subscription.
#   project = "my-project"
#
#   ## Required. Name of PubSub subscription to ingest metrics from.
#   subscription = "my-subscription"
#
#   ## Required. Data format to consume.
#   ## Each data format has its own unique set of configuration options.
#   ## Read more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"
#
#   ## Optional. Filepath for GCP credentials JSON file to authorize calls to
#   ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
#   ## Application Default Credentials, which is preferred.
#   # credentials_file = "path/to/my/creds.json"
#
#   ## Optional. Number of seconds to wait before attempting to restart the
#   ## PubSub subscription receiver after an unexpected error.
#   ## If the streaming pull for a PubSub Subscription fails (receiver),
#   ## the agent attempts to restart receiving messages after this many seconds.
#   # retry_delay_seconds = 5
#
#   ## Optional. Maximum byte length of a message to consume.
#   ## Larger messages are dropped with an error. If less than 0 or unspecified,
#   ## treated as no limit.
#   # max_message_len = 1000000
#
#   ## Optional. Maximum messages to read from PubSub that have not been written
#   ## to an output. Defaults to 1000.
#   ## For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message contains 10 metrics and the output
#   ## metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## The following are optional Subscription ReceiveSettings in PubSub.
#   ## Read more about these values:
#   ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
#
#   ## Optional. Maximum number of seconds for which a PubSub subscription
#   ## should auto-extend the PubSub ACK deadline for each message. If less than
#   ## 0, auto-extension is disabled.
#   # max_extension = 0
#
#   ## Optional. Maximum number of unprocessed messages in PubSub
#   ## (unacknowledged but not yet expired in PubSub).
#   ## A value of 0 is treated as the default PubSub value.
#   ## Negative values will be treated as unlimited.
#   # max_outstanding_messages = 0
#
#   ## Optional. Maximum size in bytes of unprocessed messages in PubSub
#   ## (unacknowledged but not yet expired in PubSub).
#   ## A value of 0 is treated as the default PubSub value.
#   ## Negative values will be treated as unlimited.
#   # max_outstanding_bytes = 0
#
#   ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
#   ## to pull messages from PubSub concurrently. This limit applies to each
#   ## subscription separately and is treated as the PubSub default if less than
#   ## 1. Note this setting does not limit the number of messages that can be
#   ## processed concurrently (use "max_outstanding_messages" instead).
#   # max_receiver_go_routines = 0
#
#   ## Optional. If true, Telegraf will attempt to base64 decode the
#   ## PubSub message data before parsing
#   # base64_data = false


# # Google Cloud Pub/Sub Push HTTP listener
# [[inputs.cloud_pubsub_push]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8080"
#
#   ## Application secret to verify messages originate from Cloud Pub/Sub
#   # token = ""
#
#   ## Path to listen to.
#   # path = "/"
#
#   ## Maximum duration before timing out read of the request
#   # read_timeout = "10s"
#   ## Maximum duration before timing out write of the response. This should be set to a value
#   ## large enough that you can send at least 'metric_batch_size' number of messages within the
#   ## duration.
#   # write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
#   # max_body_size = "500MB"
#
#   ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
#   # add_meta = false
#
#   ## Optional. Maximum messages to read from PubSub that have not been written
#   ## to an output. Defaults to 1000.
#   ## For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message contains 10 metrics and the output
#   ## metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read logging output from the Docker engine
# [[inputs.docker_log]]
#   ## Docker Endpoint
#   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
#   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
#   # endpoint = "unix:///var/run/docker.sock"
#
#   ## When true, container logs are read from the beginning; otherwise
#   ## reading begins at the end of the log.
#   # from_beginning = false
#
#   ## Timeout for Docker API calls.
#   # timeout = "5s"
#
#   ## Containers to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all containers
#   # container_name_include = []
#   # container_name_exclude = []
#
#   ## Container states to include and exclude. Globs accepted.
#   ## When empty only containers in the "running" state will be captured.
#   # container_state_include = []
#   # container_state_exclude = []
#
#   ## docker labels to include and exclude as tags.  Globs accepted.
#   ## Note that an empty array for both will include all labels as tags
#   # docker_label_include = []
#   # docker_label_exclude = []
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Influx HTTP write listener
# [[inputs.http_listener]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8186"
#
#   ## maximum duration before timing out read of the request
#   read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
#   max_body_size = "500MiB"
#
#   ## Maximum line size allowed to be sent in bytes.
#   ## 0 means to use the default of 65536 bytes (64 kibibytes)
#   max_line_size = "64KiB"
#
#
#   ## Optional tag name used to store the database.
#   ## If the write has a database in the query string then it will be kept in this tag name.
#   ## This tag can be used in downstream outputs.
#   ## The default value of nothing means it will be off and the database will not be recorded.
#   # database_tag = ""
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   tls_cert = "/etc/telegraf/cert.pem"
#   tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"


# # Generic HTTP write listener
# [[inputs.http_listener_v2]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8080"
#
#   ## Path to listen to.
#   # path = "/telegraf"
#
#   ## HTTP methods to accept.
#   # methods = ["POST", "PUT"]
#
#   ## maximum duration before timing out read of the request
#   # read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   # write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
#   # max_body_size = "500MB"
#
#   ## Part of the request to consume.  Available options are "body" and
#   ## "query".
#   # data_source = "body"
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Influx HTTP write listener
# [[inputs.influxdb_listener]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8186"
#
#   ## maximum duration before timing out read of the request
#   read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
#   max_body_size = "500MiB"
#
#   ## Maximum line size allowed to be sent in bytes.
#   ## 0 means to use the default of 65536 bytes (64 kibibytes)
#   max_line_size = "64KiB"
#
#
#   ## Optional tag name used to store the database.
#   ## If the write has a database in the query string then it will be kept in this tag name.
#   ## This tag can be used in downstream outputs.
#   ## The default value of nothing means it will be off and the database will not be recorded.
#   # database_tag = ""
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   tls_cert = "/etc/telegraf/cert.pem"
#   tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"


# # Read JTI OpenConfig Telemetry from listed sensors
# [[inputs.jti_openconfig_telemetry]]
#   ## List of device addresses to collect telemetry from
#   servers = ["localhost:1883"]
#
#   ## Authentication details. Username and password are must if device expects
#   ## authentication. Client ID must be unique when connecting from multiple instances
#   ## of telegraf to the same device
#   username = "user"
#   password = "pass"
#   client_id = "telegraf"
#
#   ## Frequency to get data
#   sample_frequency = "1000ms"
#
#   ## Sensors to subscribe for
#   ## A identifier for each sensor can be provided in path by separating with space
#   ## Else sensor path will be used as identifier
#   ## When identifier is used, we can provide a list of space separated sensors.
#   ## A single subscription will be created with all these sensors and data will
#   ## be saved to measurement with this identifier name
#   sensors = [
#    "/interfaces/",
#    "collection /components/ /lldp",
#   ]
#
#   ## We allow specifying sensor group level reporting rate. To do this, specify the
#   ## reporting rate in Duration at the beginning of sensor paths / collection
#   ## name. For entries without reporting rate, we use configured sample frequency
#   sensors = [
#    "1000ms customReporting /interfaces /lldp",
#    "2000ms collection /components",
#    "/interfaces",
#   ]
#
#   ## Optional TLS Config
#   # enable_tls = true
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
#   ## Failed streams/calls will not be retried if 0 is provided
#   retry_delay = "1000ms"
#
#   ## To treat all string values as tags, set this to true
#   str_as_tags = false


# # Read metrics from Kafka topics
# [[inputs.kafka_consumer]]
#   ## Kafka brokers.
#   brokers = ["localhost:9092"]
#
#   ## Topics to consume.
#   topics = ["telegraf"]
#
#   ## When set this tag will be added to all metrics with the topic as the value.
#   # topic_tag = ""
#
#   ## Optional Client id
#   # client_id = "Telegraf"
#
#   ## Set the minimal supported Kafka version.  Setting this enables the use of new
#   ## Kafka features and APIs.  Must be 0.10.2.0 or greater.
#   ##   ex: version = "1.1.0"
#   # version = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional SASL Config
#   # sasl_username = "kafka"
#   # sasl_password = "secret"
#
#   ## Name of the consumer group.
#   # consumer_group = "telegraf_metrics_consumers"
#
#   ## Initial offset position; one of "oldest" or "newest".
#   # offset = "oldest"
#
#   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
#   ## larger messages are dropped
#   max_message_len = 1000000
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer_legacy]]
#   ## topic(s) to consume
#   topics = ["telegraf"]
#   ## an array of Zookeeper connection strings
#   zookeeper_peers = ["localhost:2181"]
#   ## Zookeeper Chroot
#   zookeeper_chroot = ""
#   ## the name of the consumer group
#   consumer_group = "telegraf_metrics_consumers"
#   ## Offset (must be either "oldest" or "newest")
#   offset = "oldest"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"
#
#   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
#   ## larger messages are dropped
#   max_message_len = 65536


# # Configuration for the AWS Kinesis input.
# [[inputs.kinesis_consumer]]
#   ## Amazon REGION of kinesis endpoint.
#   region = "ap-southeast-2"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   # access_key = ""
#   # secret_key = ""
#   # token = ""
#   # role_arn = ""
#   # profile = ""
#   # shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   ## Kinesis StreamName must exist prior to starting telegraf.
#   streamname = "StreamName"
#
#   ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
#   # shard_iterator_type = "TRIM_HORIZON"
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"
#
#   ## Optional
#   ## Configuration for a dynamodb checkpoint
#   [inputs.kinesis_consumer.checkpoint_dynamodb]
# 	## unique name for this consumer
# 	app_name = "default"
# 	table_name = "default"


# # Stream and parse log file(s).
# [[inputs.logparser]]
#   ## Log files to parse.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**.log     -> recursively find all .log files in /var/log
#   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
#   ##   /var/log/apache.log -> only tail the apache log file
#   files = ["/var/log/apache/access.log"]
#
#   ## Read files that currently exist from the beginning. Files that are created
#   ## while telegraf is running (and that match the "files" globs) will always
#   ## be read from the beginning.
#   from_beginning = false
#
#   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
#   # watch_method = "inotify"
#
#   ## Parse logstash-style "grok" patterns:
#   [inputs.logparser.grok]
#     ## This is a list of patterns to check the given log file(s) for.
#     ## Note that adding patterns here increases processing time. The most
#     ## efficient configuration is to have one pattern per logparser.
#     ## Other common built-in patterns are:
#     ##   %{COMMON_LOG_FORMAT}   (plain apache & nginx access logs)
#     ##   %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
#     patterns = ["%{COMBINED_LOG_FORMAT}"]
#
#     ## Name of the outputted measurement name.
#     measurement = "apache_access_log"
#
#     ## Full path(s) to custom pattern files.
#     custom_pattern_files = []
#
#     ## Custom patterns can also be defined here. Put one pattern per line.
#     custom_patterns = '''
#     '''
#
#     ## Timezone allows you to provide an override for timestamps that
#     ## don't already include an offset
#     ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
#     ##
#     ## Default: "" which renders UTC
#     ## Options are as follows:
#     ##   1. Local             -- interpret based on machine localtime
#     ##   2. "Canada/Eastern"  -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
#     ##   3. UTC               -- or blank/unspecified, will return timestamp in UTC
#     # timezone = "Canada/Eastern"
#
# 	## When set to "disable", timestamp will not incremented if there is a
# 	## duplicate.
#     # unique_timestamp = "auto"


# # Read metrics from MQTT topic(s)
# [[inputs.mqtt_consumer]]
#   ## MQTT broker URLs to be used. The format should be scheme://host:port,
#   ## schema can be tcp, ssl, or ws.
#   servers = ["tcp://127.0.0.1:1883"]
#
#   ## Topics that will be subscribed to.
#   topics = [
#     "telegraf/host01/cpu",
#     "telegraf/+/mem",
#     "sensors/#",
#   ]
#
#   ## The message topic will be stored in a tag specified by this value.  If set
#   ## to the empty string no topic tag will be created.
#   # topic_tag = "topic"
#
#   ## QoS policy for messages
#   ##   0 = at most once
#   ##   1 = at least once
#   ##   2 = exactly once
#   ##
#   ## When using a QoS of 1 or 2, you should enable persistent_session to allow
#   ## resuming unacknowledged messages.
#   # qos = 0
#
#   ## Connection timeout for initial connection in seconds
#   # connection_timeout = "30s"
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Persistent session disables clearing of the client session on connection.
#   ## In order for this option to work you must also set client_id to identity
#   ## the client.  To receive messages that arrived while the client is offline,
#   ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
#   ## publishing.
#   # persistent_session = false
#
#   ## If unset, a random client ID will be generated.
#   # client_id = ""
#
#   ## Username and password to connect MQTT server.
#   # username = "telegraf"
#   # password = "metricsmetricsmetricsmetrics"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]]
#   ## urls of NATS servers
#   servers = ["nats://localhost:4222"]
#
#   ## subject(s) to consume
#   subjects = ["telegraf"]
#   ## name a queue group
#   queue_group = "telegraf_consumers"
#
#   ## Optional credentials
#   # username = ""
#   # password = ""
#
#   ## Use Transport Layer Security
#   # secure = false
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Sets the limits for pending msgs and bytes for each subscription
#   ## These shouldn't need to be adjusted except in very high throughput scenarios
#   # pending_message_limit = 65536
#   # pending_bytes_limit = 67108864
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
#   ## Server option still works but is deprecated, we just prepend it to the nsqd array.
#   # server = "localhost:4150"
#   ## An array representing the NSQD TCP HTTP Endpoints
#   nsqd = ["localhost:4150"]
#   ## An array representing the NSQLookupd HTTP Endpoints
#   nsqlookupd = ["localhost:4161"]
#   topic = "telegraf"
#   channel = "consumer"
#   max_in_flight = 100
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from one or many pgbouncer servers
# [[inputs.pgbouncer]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   ##
#   ## All connection parameters are optional.
#   ##
#   address = "host=localhost user=pgbouncer sslmode=disable"


# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   ##
#   ## All connection parameters are optional.
#   ##
#   ## Without the dbname parameter, the driver will default to a database
#   ## with the same name as the user. This dbname is just for instantiating a
#   ## connection with the server and doesn't restrict the databases we are trying
#   ## to grab metrics for.
#   ##
#   address = "host=localhost user=postgres sslmode=disable"
#   ## A custom name for the database that will be used as the "server" tag in the
#   ## measurement output. If not specified, a default one generated from
#   ## the connection address is used.
#   # outputaddress = "db01"
#
#   ## connection configuration.
#   ## maxlifetime - specify the maximum lifetime of a connection.
#   ## default is forever (0s)
#   max_lifetime = "0s"
#
#   ## A  list of databases to explicitly ignore.  If not specified, metrics for all
#   ## databases are gathered.  Do NOT use with the 'databases' option.
#   # ignored_databases = ["postgres", "template0", "template1"]
#
#   ## A list of databases to pull metrics about. If not specified, metrics for all
#   ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
#   # databases = ["app_production", "testing"]


# # Read metrics from one or many postgresql servers
# [[inputs.postgresql_extensible]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   #
#   ## All connection parameters are optional.  #
#   ## Without the dbname parameter, the driver will default to a database
#   ## with the same name as the user. This dbname is just for instantiating a
#   ## connection with the server and doesn't restrict the databases we are trying
#   ## to grab metrics for.
#   #
#   address = "host=localhost user=postgres sslmode=disable"
#
#   ## connection configuration.
#   ## maxlifetime - specify the maximum lifetime of a connection.
#   ## default is forever (0s)
#   max_lifetime = "0s"
#
#   ## A list of databases to pull metrics about. If not specified, metrics for all
#   ## databases are gathered.
#   ## databases = ["app_production", "testing"]
#   #
#   ## A custom name for the database that will be used as the "server" tag in the
#   ## measurement output. If not specified, a default one generated from
#   ## the connection address is used.
#   # outputaddress = "db01"
#   #
#   ## Define the toml config where the sql queries are stored
#   ## New queries can be added, if the withdbname is set to true and there is no
#   ## databases defined in the 'databases field', the sql query is ended by a
#   ## 'is not null' in order to make the query succeed.
#   ## Example :
#   ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
#   ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
#   ## because the databases variable was set to ['postgres', 'pgbench' ] and the
#   ## withdbname was true. Be careful that if the withdbname is set to false you
#   ## don't have to define the where clause (aka with the dbname) the tagvalue
#   ## field is used to define custom tags (separated by commas)
#   ## The optional "measurement" value can be used to override the default
#   ## output measurement name ("postgresql").
#   #
#   ## Structure :
#   ## [[inputs.postgresql_extensible.query]]
#   ##   sqlquery string
#   ##   version string
#   ##   withdbname boolean
#   ##   tagvalue string (comma separated)
#   ##   measurement string
#   [[inputs.postgresql_extensible.query]]
#     sqlquery="SELECT * FROM pg_stat_database"
#     version=901
#     withdbname=false
#     tagvalue=""
#     measurement=""
#   [[inputs.postgresql_extensible.query]]
#     sqlquery="SELECT * FROM pg_stat_bgwriter"
#     version=901
#     withdbname=false
#     tagvalue="postgresql.stats"


# # Read metrics from one or many prometheus clients
# [[inputs.prometheus]]
#   ## An array of urls to scrape metrics from.
#   urls = ["http://localhost:9100/metrics"]
#
#   ## An array of Kubernetes services to scrape metrics from.
#   # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
#   ## Kubernetes config file to create client from.
#   # kube_config = "/path/to/kubernetes.config"
#
#   ## Scrape Kubernetes pods for the following prometheus annotations:
#   ## - prometheus.io/scrape: Enable scraping for this pod
#   ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
#   ##     set this to 'https' & most likely set the tls config.
#   ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
#   ## - prometheus.io/port: If port is not 9102 use this annotation
#   # monitor_kubernetes_pods = true
#   ## Restricts Kubernetes monitoring to a single namespace
#   ##   ex: monitor_kubernetes_pods_namespace = "default"
#   # monitor_kubernetes_pods_namespace = ""
#
#   ## Use bearer token for authorization. ('bearer_token' takes priority)
#   # bearer_token = "/path/to/bearer/token"
#   ## OR
#   # bearer_token_string = "abc_123"
#
#   ## HTTP Basic Authentication username and password. ('bearer_token' and
#   ## 'bearer_token_string' take priority)
#   # username = ""
#   # password = ""
#
# 	## Specify timeout duration for slower prometheus clients (default is 3s)
#   # response_timeout = "3s"
#
#   ## Optional TLS Config
#   # tls_ca = /path/to/cafile
#   # tls_cert = /path/to/certfile
#   # tls_key = /path/to/keyfile
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
#   ## URL to listen on
#   # service_address = "tcp://:8094"
#   # service_address = "tcp://127.0.0.1:http"
#   # service_address = "tcp4://:8094"
#   # service_address = "tcp6://:8094"
#   # service_address = "tcp6://[2001:db8::1]:8094"
#   # service_address = "udp://:8094"
#   # service_address = "udp4://:8094"
#   # service_address = "udp6://:8094"
#   # service_address = "unix:///tmp/telegraf.sock"
#   # service_address = "unixgram:///tmp/telegraf.sock"
#
#   ## Change the file mode bits on unix sockets.  These permissions may not be
#   ## respected by some platforms, to safely restrict write permissions it is best
#   ## to place the socket into a directory that has previously been created
#   ## with the desired permissions.
#   ##   ex: socket_mode = "777"
#   # socket_mode = ""
#
#   ## Maximum number of concurrent connections.
#   ## Only applies to stream sockets (e.g. TCP).
#   ## 0 (default) is unlimited.
#   # max_connections = 1024
#
#   ## Read timeout.
#   ## Only applies to stream sockets (e.g. TCP).
#   ## 0 (default) is unlimited.
#   # read_timeout = "30s"
#
#   ## Optional TLS configuration.
#   ## Only applies to stream sockets (e.g. TCP).
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key  = "/etc/telegraf/key.pem"
#   ## Enables client authentication if set.
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Maximum socket buffer size (in bytes when no unit specified).
#   ## For stream sockets, once the buffer fills up, the sender will start backing up.
#   ## For datagram sockets, once the buffer fills up, metrics will start dropping.
#   ## Defaults to the OS default.
#   # read_buffer_size = "64KiB"
#
#   ## Period between keep alive probes.
#   ## Only applies to TCP sockets.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   # keep_alive_period = "5m"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # Statsd UDP/TCP Server
# [[inputs.statsd]]
#   ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
#   protocol = "udp"
#
#   ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
#   max_tcp_connections = 250
#
#   ## Enable TCP keep alive probes (default=false)
#   tcp_keep_alive = false
#
#   ## Specifies the keep-alive period for an active network connection.
#   ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
#   ## Defaults to the OS configuration.
#   # tcp_keep_alive_period = "2h"
#
#   ## Address and port to host UDP listener on
#   service_address = ":8125"
#
#   ## The following configuration options control when telegraf clears it's cache
#   ## of previous values. If set to false, then telegraf will only clear it's
#   ## cache when the daemon is restarted.
#   ## Reset gauges every interval (default=true)
#   delete_gauges = true
#   ## Reset counters every interval (default=true)
#   delete_counters = true
#   ## Reset sets every interval (default=true)
#   delete_sets = true
#   ## Reset timings & histograms every interval (default=true)
#   delete_timings = true
#
#   ## Percentiles to calculate for timing & histogram stats
#   percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
#
#   ## separator to use between elements of a statsd metric
#   metric_separator = "_"
#
#   ## Parses tags in the datadog statsd format
#   ## http://docs.datadoghq.com/guides/dogstatsd/
#   parse_data_dog_tags = false
#
#   ## Parses datadog extensions to the statsd format
#   datadog_extensions = false
#
#   ## Statsd data translation templates, more info can be read here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
#   # templates = [
#   #     "cpu.* measurement*"
#   # ]
#
#   ## Number of UDP messages allowed to queue up, once filled,
#   ## the statsd server will start dropping packets
#   allowed_pending_messages = 10000
#
#   ## Number of timing/histogram values to track per-measurement in the
#   ## calculation of percentiles. Raising this limit increases the accuracy
#   ## of percentiles but also increases the memory usage and cpu time.
#   percentile_limit = 1000


# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
# [[inputs.syslog]]
#   ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
#   ## Protocol, address and port to host the syslog receiver.
#   ## If no host is specified, then localhost is used.
#   ## If no port is specified, 6514 is used (RFC5425#section-4.1).
#   server = "tcp://:6514"
#
#   ## TLS Config
#   # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Period between keep alive probes.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   ## Only applies to stream sockets (e.g. TCP).
#   # keep_alive_period = "5m"
#
#   ## Maximum number of concurrent connections (default = 0).
#   ## 0 means unlimited.
#   ## Only applies to stream sockets (e.g. TCP).
#   # max_connections = 1024
#
#   ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
#   ## 0 means unlimited.
#   # read_timeout = "5s"
#
#   ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
#   ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
#   ## or the non-transparent framing technique (RFC6587#section-3.4.2).
#   ## Must be one of "octet-counting", "non-transparent".
#   # framing = "octet-counting"
#
#   ## The trailer to be expected in case of non-trasparent framing (default = "LF").
#   ## Must be one of "LF", or "NUL".
#   # trailer = "LF"
#
#   ## Whether to parse in best effort mode or not (default = false).
#   ## By default best effort parsing is off.
#   # best_effort = false
#
#   ## Character to prepend to SD-PARAMs (default = "_").
#   ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
#   ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
#   ## For each combination a field is created.
#   ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
#   # sdparam_separator = "_"


# # Stream a log file, like the tail -f command
# [[inputs.tail]]
#   ## files to tail.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
#   ##   "/var/log/apache.log" -> just tail the apache log file
#   ##
#   ## See https://github.com/gobwas/glob for more examples
#   ##
#   files = ["/var/mymetrics.out"]
#   ## Read file from beginning.
#   from_beginning = false
#   ## Whether file is a named pipe
#   pipe = false
#
#   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
#   # watch_method = "inotify"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Generic TCP listener
# [[inputs.tcp_listener]]
#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
#   # socket_listener plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener


# # Generic UDP listener
# [[inputs.udp_listener]]
#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
#   # socket_listener plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener


# # Read metrics from VMware vCenter
# [[inputs.vsphere]]
#   ## List of vCenter URLs to be monitored. These three lines must be uncommented
#   ## and edited for the plugin to work.
#   vcenters = [ "https://vcenter.local/sdk" ]
#   username = "user@corp.local"
#   password = "secret"
#
#   ## VMs
#   ## Typical VM metrics (if omitted or empty, all metrics are collected)
#   vm_metric_include = [
#     "cpu.demand.average",
#     "cpu.idle.summation",
#     "cpu.latency.average",
#     "cpu.readiness.average",
#     "cpu.ready.summation",
#     "cpu.run.summation",
#     "cpu.usagemhz.average",
#     "cpu.used.summation",
#     "cpu.wait.summation",
#     "mem.active.average",
#     "mem.granted.average",
#     "mem.latency.average",
#     "mem.swapin.average",
#     "mem.swapinRate.average",
#     "mem.swapout.average",
#     "mem.swapoutRate.average",
#     "mem.usage.average",
#     "mem.vmmemctl.average",
#     "net.bytesRx.average",
#     "net.bytesTx.average",
#     "net.droppedRx.summation",
#     "net.droppedTx.summation",
#     "net.usage.average",
#     "power.power.average",
#     "virtualDisk.numberReadAveraged.average",
#     "virtualDisk.numberWriteAveraged.average",
#     "virtualDisk.read.average",
#     "virtualDisk.readOIO.latest",
#     "virtualDisk.throughput.usage.average",
#     "virtualDisk.totalReadLatency.average",
#     "virtualDisk.totalWriteLatency.average",
#     "virtualDisk.write.average",
#     "virtualDisk.writeOIO.latest",
#     "sys.uptime.latest",
#   ]
#   # vm_metric_exclude = [] ## Nothing is excluded by default
#   # vm_instances = true ## true by default
#
#   ## Hosts
#   ## Typical host metrics (if omitted or empty, all metrics are collected)
#   host_metric_include = [
#     "cpu.coreUtilization.average",
#     "cpu.costop.summation",
#     "cpu.demand.average",
#     "cpu.idle.summation",
#     "cpu.latency.average",
#     "cpu.readiness.average",
#     "cpu.ready.summation",
#     "cpu.swapwait.summation",
#     "cpu.usage.average",
#     "cpu.usagemhz.average",
#     "cpu.used.summation",
#     "cpu.utilization.average",
#     "cpu.wait.summation",
#     "disk.deviceReadLatency.average",
#     "disk.deviceWriteLatency.average",
#     "disk.kernelReadLatency.average",
#     "disk.kernelWriteLatency.average",
#     "disk.numberReadAveraged.average",
#     "disk.numberWriteAveraged.average",
#     "disk.read.average",
#     "disk.totalReadLatency.average",
#     "disk.totalWriteLatency.average",
#     "disk.write.average",
#     "mem.active.average",
#     "mem.latency.average",
#     "mem.state.latest",
#     "mem.swapin.average",
#     "mem.swapinRate.average",
#     "mem.swapout.average",
#     "mem.swapoutRate.average",
#     "mem.totalCapacity.average",
#     "mem.usage.average",
#     "mem.vmmemctl.average",
#     "net.bytesRx.average",
#     "net.bytesTx.average",
#     "net.droppedRx.summation",
#     "net.droppedTx.summation",
#     "net.errorsRx.summation",
#     "net.errorsTx.summation",
#     "net.usage.average",
#     "power.power.average",
#     "storageAdapter.numberReadAveraged.average",
#     "storageAdapter.numberWriteAveraged.average",
#     "storageAdapter.read.average",
#     "storageAdapter.write.average",
#     "sys.uptime.latest",
#   ]
#   ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
#   # ip_addresses = ["ipv6", "ipv4" ]
#   # host_metric_exclude = [] ## Nothing excluded by default
#   # host_instances = true ## true by default
#
#   ## Clusters
#   # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
#   # cluster_metric_exclude = [] ## Nothing excluded by default
#   # cluster_instances = false ## false by default
#
#   ## Datastores
#   # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
#   # datastore_metric_exclude = [] ## Nothing excluded by default
#   # datastore_instances = false ## false by default for Datastores only
#
#   ## Datacenters
#   datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
#   datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
#   # datacenter_instances = false ## false by default for Datastores only
#
#   ## Plugin Settings
#   ## separator character to use for measurement and field names (default: "_")
#   # separator = "_"
#
#   ## number of objects to retreive per query for realtime resources (vms and hosts)
#   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
#   # max_query_objects = 256
#
#   ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
#   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
#   # max_query_metrics = 256
#
#   ## number of go routines to use for collection and discovery of objects and metrics
#   # collect_concurrency = 1
#   # discover_concurrency = 1
#
#   ## whether or not to force discovery of new objects on initial gather call before collecting metrics
#   ## when true for large environments this may cause errors for time elapsed while collecting metrics
#   ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
#   # force_discover_on_init = false
#
#   ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
#   # object_discovery_interval = "300s"
#
#   ## timeout applies to any of the api request made to vcenter
#   # timeout = "60s"
#
#   ## When set to true, all samples are sent as integers. This makes the output
#   ## data types backwards compatible with Telegraf 1.9 or lower. Normally all
#   ## samples from vCenter, with the exception of percentages, are integer
#   ## values, but under some conditions, some averaging takes place internally in
#   ## the plugin. Setting this flag to "false" will send values as floats to
#   ## preserve the full precision when averaging takes place.
#   # use_int_samples = true
#
#   ## Custom attributes from vCenter can be very useful for queries in order to slice the
#   ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
#   ## by default, since they can add a considerable amount of tags to the resulting metrics. To
#   ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include
#   ## to select the attributes you want to include.
#   # custom_attribute_include = []
#   # custom_attribute_exclude = ["*"]
#
#   ## Optional SSL Config
#   # ssl_ca = "/path/to/cafile"
#   # ssl_cert = "/path/to/certfile"
#   # ssl_key = "/path/to/keyfile"
#   ## Use SSL but skip chain & host verification
#   # insecure_skip_verify = false


# # A Webhooks Event collector
# [[inputs.webhooks]]
#   ## Address and port to host Webhook listener on
#   service_address = ":1619"
#
#   [inputs.webhooks.filestack]
#     path = "/filestack"
#
#   [inputs.webhooks.github]
#     path = "/github"
#     # secret = ""
#
#   [inputs.webhooks.mandrill]
#     path = "/mandrill"
#
#   [inputs.webhooks.rollbar]
#     path = "/rollbar"
#
#   [inputs.webhooks.papertrail]
#     path = "/papertrail"
#
#   [inputs.webhooks.particle]
#     path = "/particle"


# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
# [[inputs.zipkin]]
#   # path = "/api/v1/spans" # URL path for span data
#   # port = 9411            # Port on which Telegraf listens