ini telegraph.conf

telegraph.conf
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply prepend
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)


# Global tags can be specified here in key="value" format.
[global_tags]
  # dc = "us-east-1" # will tag all metrics with dc=us-east-1
  # rack = "1a"
  ## Environment variables can be used as tags, and throughout the config file
  # user = "$USER"


# Configuration for telegraf agent
[agent]
  ## Default data collection interval for all inputs
  interval = "10s"
  ## Rounds collection interval to 'interval'
  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
  round_interval = true

  ## Telegraf will send metrics to outputs in batches of at most
  ## metric_batch_size metrics.
  ## This controls the size of writes that Telegraf sends to output plugins.
  metric_batch_size = 1000

  ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
  ## output, and will flush this buffer on a successful write. Oldest metrics
  ## are dropped first when this buffer fills.
  ## This buffer only fills when writes fail to output plugin(s).
  metric_buffer_limit = 10000

  ## Collection jitter is used to jitter the collection by a random amount.
  ## Each plugin will sleep for a random time within jitter before collecting.
  ## This can be used to avoid many plugins querying things like sysfs at the
  ## same time, which can have a measurable effect on the system.
  collection_jitter = "0s"

  ## Default flushing interval for all outputs. Maximum flush_interval will be
  ## flush_interval + flush_jitter
  flush_interval = "10s"
  ## Jitter the flush interval by a random amount. This is primarily to avoid
  ## large write spikes for users running a large number of telegraf instances.
  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
  flush_jitter = "0s"

  ## By default or when set to "0s", precision will be set to the same
  ## timestamp order as the collection interval, with the maximum being 1s.
  ##   ie, when interval = "10s", precision will be "1s"
  ##       when interval = "250ms", precision will be "1ms"
  ## Precision will NOT be used for service inputs. It is up to each individual
  ## service input to set the timestamp at the appropriate precision.
  ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
  precision = ""

  ## Logging configuration:
  ## Run telegraf with debug log messages.
  debug = false
  ## Run telegraf in quiet mode (error log messages only).
  quiet = false
  ## Specify the log file name. The empty string means to log to stderr.
  logfile = ""

  ## Override default hostname, if empty use os.Hostname()
  hostname = ""
  ## If set to true, do no set the "host" tag in the telegraf agent.
  omit_hostname = false


###############################################################################
#                            OUTPUT PLUGINS                                   #
###############################################################################

# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
  ## The full HTTP or UDP URL for your InfluxDB instance.
  ##
  ## Multiple URLs can be specified for a single cluster, only ONE of the
  ## urls will be written to each interval.
  # urls = ["unix:///var/run/influxdb.sock"]
  # urls = ["udp://127.0.0.1:8089"]
  # urls = ["http://127.0.0.1:8086"]

  ## The target database for metrics; will be created as needed.
  # database = "telegraf"

  ## If true, no CREATE DATABASE queries will be sent.  Set to true when using
  ## Telegraf with a user without permissions to create databases or when the
  ## database already exists.
  # skip_database_creation = false

  ## Name of existing retention policy to write to.  Empty string writes to
  ## the default retention policy.  Only takes effect when using HTTP.
  # retention_policy = ""

  ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
  ## Only takes effect when using HTTP.
  # write_consistency = "any"

  ## Timeout for HTTP messages.
  # timeout = "5s"

  ## HTTP Basic Auth
  # username = "telegraf"
  # password = "metricsmetricsmetricsmetrics"

  ## HTTP User-Agent
  # user_agent = "telegraf"

  ## UDP payload size is the maximum packet size to send.
  # udp_payload = "512B"

  ## Optional TLS Config for use on HTTP connections.
  # tls_ca = "/etc/telegraf/ca.pem"
  # tls_cert = "/etc/telegraf/cert.pem"
  # tls_key = "/etc/telegraf/key.pem"
  ## Use TLS but skip chain & host verification
  # insecure_skip_verify = false

  ## HTTP Proxy override, if unset values the standard proxy environment
  ## variables are consulted to determine which proxy, if any, should be used.
  # http_proxy = "http://corporate.proxy:3128"

  ## Additional HTTP headers
  # http_headers = {"X-Special-Header" = "Special-Value"}

  ## HTTP Content-Encoding for write request body, can be set to "gzip" to
  ## compress body or "identity" to apply no encoding.
  # content_encoding = "identity"

  ## When true, Telegraf will output unsigned integers as unsigned values,
  ## i.e.: "42u".  You will need a version of InfluxDB supporting unsigned
  ## integer values.  Enabling this option will result in field type errors if
  ## existing data has been written.
  # influx_uint_support = false


# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
#   ## Amon Server Key
#   server_key = "my-server-key" # required.
#
#   ## Amon Instance URL
#   amon_instance = "https://youramoninstance" # required
#
#   ## Connection timeout.
#   # timeout = "5s"


# # Publishes metrics to an AMQP broker
# [[outputs.amqp]]
#   ## Broker to publish to.
#   ##   deprecated in 1.7; use the brokers option
#   # url = "amqp://localhost:5672/influxdb"
#
#   ## Brokers to publish to.  If multiple brokers are specified a random broker
#   ## will be selected anytime a connection is established.  This can be
#   ## helpful for load balancing when not using a dedicated load balancer.
#   brokers = ["amqp://localhost:5672/influxdb"]
#
#   ## Maximum messages to send over a connection.  Once this is reached, the
#   ## connection is closed and a new connection is made.  This can be helpful for
#   ## load balancing when not using a dedicated load balancer.
#   # max_messages = 0
#
#   ## Exchange to declare and publish to.
#   exchange = "telegraf"
#
#   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
#   # exchange_type = "topic"
#
#   ## If true, exchange will be passively declared.
#   # exchange_declare_passive = false
#
#   ## Exchange durability can be either "transient" or "durable".
#   # exchange_durability = "durable"
#
#   ## Additional exchange arguments.
#   # exchange_arguments = { }
#   # exchange_arguments = {"hash_propery" = "timestamp"}
#
#   ## Authentication credentials for the PLAIN auth_method.
#   # username = ""
#   # password = ""
#
#   ## Auth method. PLAIN and EXTERNAL are supported
#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
#   ## described here: https://www.rabbitmq.com/plugins.html
#   # auth_method = "PLAIN"
#
#   ## Metric tag to use as a routing key.
#   ##   ie, if this tag exists, its value will be used as the routing key
#   # routing_tag = "host"
#
#   ## Static routing key.  Used when no routing_tag is set or as a fallback
#   ## when the tag specified in routing tag is not found.
#   # routing_key = ""
#   # routing_key = "telegraf"
#
#   ## Delivery Mode controls if a published message is persistent.
#   ##   One of "transient" or "persistent".
#   # delivery_mode = "transient"
#
#   ## InfluxDB database added as a message header.
#   ##   deprecated in 1.7; use the headers option
#   # database = "telegraf"
#
#   ## InfluxDB retention policy added as a message header
#   ##   deprecated in 1.7; use the headers option
#   # retention_policy = "default"
#
#   ## Static headers added to each published message.
#   # headers = { }
#   # headers = {"database" = "telegraf", "retention_policy" = "default"}
#
#   ## Connection timeout.  If not provided, will default to 5s.  0s means no
#   ## timeout (not recommended).
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## If true use batch serialization format instead of line based delimiting.
#   ## Only applies to data formats which are not line based such as JSON.
#   ## Recommended to set to true.
#   # use_batch_format = false
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"


# # Send metrics to Azure Application Insights
# [[outputs.application_insights]]
#   ## Instrumentation key of the Application Insights resource.
#   instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
#
#   ## Timeout for closing (default: 5s).
#   # timeout = "5s"
#
#   ## Enable additional diagnostic logging.
#   # enable_diagnostic_logging = false
#
#   ## Context Tag Sources add Application Insights context tags to a tag value.
#   ##
#   ## For list of allowed context tag keys see:
#   ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
#   # [outputs.application_insights.context_tag_sources]
#   #   "ai.cloud.role" = "kubernetes_container_name"
#   #   "ai.cloud.roleInstance" = "kubernetes_pod_name"


# # Send aggregate metrics to Azure Monitor
# [[outputs.azure_monitor]]
#   ## Timeout for HTTP writes.
#   # timeout = "20s"
#
#   ## Set the namespace prefix, defaults to "Telegraf/<input-name>".
#   # namespace_prefix = "Telegraf/"
#
#   ## Azure Monitor doesn't have a string value type, so convert string
#   ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
#   ## a maximum of 10 dimensions so Telegraf will only send the first 10
#   ## alphanumeric dimensions.
#   # strings_as_dimensions = false
#
#   ## Both region and resource_id must be set or be available via the
#   ## Instance Metadata service on Azure Virtual Machines.
#   #
#   ## Azure Region to publish metrics against.
#   ##   ex: region = "southcentralus"
#   # region = ""
#   #
#   ## The Azure Resource ID against which metric will be logged, e.g.
#   ##   ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
#   # resource_id = ""


# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
#   ## Amazon REGION
#   region = "us-east-1"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   #access_key = ""
#   #secret_key = ""
#   #token = ""
#   #role_arn = ""
#   #profile = ""
#   #shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   ## Namespace for the CloudWatch MetricDatums
#   namespace = "InfluxData/Telegraf"
#
#   ## If you have a large amount of metrics, you should consider to send statistic
#   ## values instead of raw metrics which could not only improve performance but
#   ## also save AWS API cost. If enable this flag, this plugin would parse the required
#   ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
#   ## You could use basicstats aggregator to calculate those fields. If not all statistic
#   ## fields are available, all fields would still be sent as raw metrics.
#   # write_statistics = false


# # Configuration for CrateDB to send metrics to.
# [[outputs.cratedb]]
#   # A github.com/jackc/pgx connection string.
#   # See https://godoc.org/github.com/jackc/pgx#ParseDSN
#   url = "postgres://user:password@localhost/schema?sslmode=disable"
#   # Timeout for all CrateDB queries.
#   timeout = "5s"
#   # Name of the table to store metrics in.
#   table = "metrics"
#   # If true, and the metrics table does not exist, create it automatically.
#   table_create = true


# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
#   ## Datadog API key
#   apikey = "my-secret-key" # required.
#
#   # The base endpoint URL can optionally be specified but it defaults to:
#   #url = "https://app.datadoghq.com/api/v1/series"
#
#   ## Connection timeout.
#   # timeout = "5s"


# # Send metrics to nowhere at all
# [[outputs.discard]]
#   # no configuration


# # Configuration for Elasticsearch to send metrics to.
# [[outputs.elasticsearch]]
#   ## The full HTTP endpoint URL for your Elasticsearch instance
#   ## Multiple urls can be specified as part of the same cluster,
#   ## this means that only ONE of the urls will be written to each interval.
#   urls = [ "http://node1.es.example.com:9200" ] # required.
#   ## Elasticsearch client timeout, defaults to "5s" if not set.
#   timeout = "5s"
#   ## Set to true to ask Elasticsearch a list of all cluster nodes,
#   ## thus it is not necessary to list all nodes in the urls config option.
#   enable_sniffer = false
#   ## Set the interval to check if the Elasticsearch nodes are available
#   ## Setting to "0s" will disable the health check (not recommended in production)
#   health_check_interval = "10s"
#   ## HTTP basic authentication details (eg. when using Shield)
#   # username = "telegraf"
#   # password = "mypassword"
#
#   ## Index Config
#   ## The target index for metrics (Elasticsearch will create if it not exists).
#   ## You can use the date specifiers below to create indexes per time frame.
#   ## The metric timestamp will be used to decide the destination index name
#   # %Y - year (2016)
#   # %y - last two digits of year (00..99)
#   # %m - month (01..12)
#   # %d - day of month (e.g., 01)
#   # %H - hour (00..23)
#   # %V - week of the year (ISO week) (01..53)
#   ## Additionally, you can specify a tag name using the notation {{tag_name}}
#   ## which will be used as part of the index name. If the tag does not exist,
#   ## the default tag value will be used.
#   # index_name = "telegraf-{{host}}-%Y.%m.%d"
#   # default_tag_value = "none"
#   index_name = "telegraf-%Y.%m.%d" # required.
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Template Config
#   ## Set to true if you want telegraf to manage its index template.
#   ## If enabled it will create a recommended index template for telegraf indexes
#   manage_template = true
#   ## The template name used for telegraf indexes
#   template_name = "telegraf"
#   ## Set to true if you want telegraf to overwrite an existing template
#   overwrite_template = false


# # Send telegraf metrics to file(s)
# [[outputs.file]]
#   ## Files to write to, "stdout" is a specially handled file.
#   files = ["stdout", "/tmp/metrics.out"]
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Configuration for Graphite server to send metrics to
# [[outputs.graphite]]
#   ## TCP endpoint for your graphite instance.
#   ## If multiple endpoints are configured, output will be load balanced.
#   ## Only one of the endpoints will be written to with each iteration.
#   servers = ["localhost:2003"]
#   ## Prefix metrics name
#   prefix = ""
#   ## Graphite output template
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   template = "host.tags.measurement.field"
#
#   ## Enable Graphite tags support
#   # graphite_tag_support = false
#
#   ## timeout in seconds for the write connection to graphite
#   timeout = 2
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
#   ## UDP endpoint for your graylog instance.
#   servers = ["127.0.0.1:12201", "192.168.1.1:12201"]


# # A plugin that can transmit metrics over HTTP
# [[outputs.http]]
#   ## URL is the address to send metrics to
#   url = "http://127.0.0.1:8080/metric"
#
#   ## Timeout for HTTP message
#   # timeout = "5s"
#
#   ## HTTP method, one of: "POST" or "PUT"
#   # method = "POST"
#
#   ## HTTP Basic Auth credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## OAuth2 Client Credentials Grant
#   # client_id = "clientid"
#   # client_secret = "secret"
#   # token_url = "https://indentityprovider/oauth2/v1/token"
#   # scopes = ["urn:opc:idm:__myscopes__"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to output.
#   ## Each data format has it's own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"
#
#   ## Additional HTTP headers
#   # [outputs.http.headers]
#   #   # Should be set manually to "application/json" for json data_format
#   #   Content-Type = "text/plain; charset=utf-8"
#
#   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
#   ## compress body or "identity" to apply no encoding.
#   # content_encoding = "identity"


# # Configuration for sending metrics to InfluxDB
# [[outputs.influxdb_v2]]
#   ## The URLs of the InfluxDB cluster nodes.
#   ##
#   ## Multiple URLs can be specified for a single cluster, only ONE of the
#   ## urls will be written to each interval.
#   urls = ["http://127.0.0.1:9999"]
#
#   ## Token for authentication.
#   token = ""
#
#   ## Organization is the name of the organization you wish to write to; must exist.
#   organization = ""
#
#   ## Destination bucket to write into.
#   bucket = ""
#
#   ## Timeout for HTTP messages.
#   # timeout = "5s"
#
#   ## Additional HTTP headers
#   # http_headers = {"X-Special-Header" = "Special-Value"}
#
#   ## HTTP Proxy override, if unset values the standard proxy environment
#   ## variables are consulted to determine which proxy, if any, should be used.
#   # http_proxy = "http://corporate.proxy:3128"
#
#   ## HTTP User-Agent
#   # user_agent = "telegraf"
#
#   ## Content-Encoding for write request body, can be set to "gzip" to
#   ## compress body or "identity" to apply no encoding.
#   # content_encoding = "gzip"
#
#   ## Enable or disable uint support for writing uints influxdb 2.0.
#   # influx_uint_support = false
#
#   ## Optional TLS Config for use on HTTP connections.
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
#   ## Project API Token (required)
#   api_token = "API Token" # required
#   ## Prefix the metrics with a given name
#   prefix = ""
#   ## Stats output template (Graphite formatting)
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
#   template = "host.tags.measurement.field"
#   ## Timeout in seconds to connect
#   timeout = "2s"
#   ## Display Communcation to Instrumental
#   debug = false


# # Configuration for the Kafka server to send metrics to
# [[outputs.kafka]]
#   ## URLs of kafka brokers
#   brokers = ["localhost:9092"]
#   ## Kafka topic for producer messages
#   topic = "telegraf"
#
#   ## Optional Client id
#   # client_id = "Telegraf"
#
#   ## Set the minimal supported Kafka version.  Setting this enables the use of new
#   ## Kafka features and APIs.  Of particular interest, lz4 compression
#   ## requires at least version 0.10.0.0.
#   ##   ex: version = "1.1.0"
#   # version = ""
#
#   ## Optional topic suffix configuration.
#   ## If the section is omitted, no suffix is used.
#   ## Following topic suffix methods are supported:
#   ##   measurement - suffix equals to separator + measurement's name
#   ##   tags        - suffix equals to separator + specified tags' values
#   ##                 interleaved with separator
#
#   ## Suffix equals to "_" + measurement name
#   # [outputs.kafka.topic_suffix]
#   #   method = "measurement"
#   #   separator = "_"
#
#   ## Suffix equals to "__" + measurement's "foo" tag value.
#   ##   If there's no such a tag, suffix equals to an empty string
#   # [outputs.kafka.topic_suffix]
#   #   method = "tags"
#   #   keys = ["foo"]
#   #   separator = "__"
#
#   ## Suffix equals to "_" + measurement's "foo" and "bar"
#   ##   tag values, separated by "_". If there is no such tags,
#   ##   their values treated as empty strings.
#   # [outputs.kafka.topic_suffix]
#   #   method = "tags"
#   #   keys = ["foo", "bar"]
#   #   separator = "_"
#
#   ## Telegraf tag to use as a routing key
#   ##  ie, if this tag exists, its value will be used as the routing key
#   routing_tag = "host"
#
#   ## Static routing key.  Used when no routing_tag is set or as a fallback
#   ## when the tag specified in routing tag is not found.  If set to "random",
#   ## a random value will be generated for each message.
#   ##   ex: routing_key = "random"
#   ##       routing_key = "telegraf"
#   # routing_key = ""
#
#   ## CompressionCodec represents the various compression codecs recognized by
#   ## Kafka in messages.
#   ##  0 : No compression
#   ##  1 : Gzip compression
#   ##  2 : Snappy compression
#   ##  3 : LZ4 compression
#   # compression_codec = 0
#
#   ##  RequiredAcks is used in Produce Requests to tell the broker how many
#   ##  replica acknowledgements it must see before responding
#   ##   0 : the producer never waits for an acknowledgement from the broker.
#   ##       This option provides the lowest latency but the weakest durability
#   ##       guarantees (some data will be lost when a server fails).
#   ##   1 : the producer gets an acknowledgement after the leader replica has
#   ##       received the data. This option provides better durability as the
#   ##       client waits until the server acknowledges the request as successful
#   ##       (only messages that were written to the now-dead leader but not yet
#   ##       replicated will be lost).
#   ##   -1: the producer gets an acknowledgement after all in-sync replicas have
#   ##       received the data. This option provides the best durability, we
#   ##       guarantee that no messages will be lost as long as at least one in
#   ##       sync replica remains.
#   # required_acks = -1
#
#   ## The maximum number of times to retry sending a metric before failing
#   ## until the next flush.
#   # max_retry = 3
#
#   ## The maximum permitted size of a message. Should be set equal to or
#   ## smaller than the broker's 'message.max.bytes'.
#   # max_message_bytes = 1000000
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional SASL Config
#   # sasl_username = "kafka"
#   # sasl_password = "secret"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   # data_format = "influx"


# # Configuration for the AWS Kinesis output.
# [[outputs.kinesis]]
#   ## Amazon REGION of kinesis endpoint.
#   region = "ap-southeast-2"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   #access_key = ""
#   #secret_key = ""
#   #token = ""
#   #role_arn = ""
#   #profile = ""
#   #shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   ## Kinesis StreamName must exist prior to starting telegraf.
#   streamname = "StreamName"
#   ## DEPRECATED: PartitionKey as used for sharding data.
#   partitionkey = "PartitionKey"
#   ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
#   ## This allows for scaling across multiple shards in a stream.
#   ## This will cause issues with ordering.
#   use_random_partitionkey = false
#   ## The partition key can be calculated using one of several methods:
#   ##
#   ## Use a static value for all writes:
#   #  [outputs.kinesis.partition]
#   #    method = "static"
#   #    key = "howdy"
#   #
#   ## Use a random partition key on each write:
#   #  [outputs.kinesis.partition]
#   #    method = "random"
#   #
#   ## Use the measurement name as the partition key:
#   #  [outputs.kinesis.partition]
#   #    method = "measurement"
#   #
#   ## Use the value of a tag for all writes, if the tag is not set the empty
#   ## default option will be used. When no default, defaults to "telegraf"
#   #  [outputs.kinesis.partition]
#   #    method = "tag"
#   #    key = "host"
#   #    default = "mykey"
#
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"
#
#   ## debug will show upstream aws messages.
#   debug = false


# # Configuration for Librato API to send metrics to.
# [[outputs.librato]]
#   ## Librator API Docs
#   ## http://dev.librato.com/v1/metrics-authentication
#   ## Librato API user
#   api_user = "telegraf@influxdb.com" # required.
#   ## Librato API token
#   api_token = "my-secret-token" # required.
#   ## Debug
#   # debug = false
#   ## Connection timeout.
#   # timeout = "5s"
#   ## Output source Template (same as graphite buckets)
#   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
#   ## This template is used in librato's source (not metric's name)
#   template = "host"
#


# # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]]
#   servers = ["localhost:1883"] # required.
#
#   ## MQTT outputs send metrics to this topic format
#   ##    "<topic_prefix>/<hostname>/<pluginname>/"
#   ##   ex: prefix/web01.example.com/mem
#   topic_prefix = "telegraf"
#
#   ## QoS policy for messages
#   ##   0 = at most once
#   ##   1 = at least once
#   ##   2 = exactly once
#   # qos = 2
#
#   ## username and password to connect MQTT server.
#   # username = "telegraf"
#   # password = "metricsmetricsmetricsmetrics"
#
#   ## client ID, if not set a random ID is generated
#   # client_id = ""
#
#   ## Timeout for write operations. default: 5s
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## When true, metrics will be sent in one MQTT message per flush.  Otherwise,
#   ## metrics are written one metric per MQTT message.
#   # batch = false
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Send telegraf measurements to NATS
# [[outputs.nats]]
#   ## URLs of NATS servers
#   servers = ["nats://localhost:4222"]
#   ## Optional credentials
#   # username = ""
#   # password = ""
#   ## NATS subject for producer messages
#   subject = "telegraf"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
#   ## Location of nsqd instance listening on TCP
#   server = "localhost:4150"
#   ## NSQ topic for producer messages
#   topic = "telegraf"
#
#   ## Data format to output.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
#   data_format = "influx"


# # Configuration for OpenTSDB server to send metrics to
# [[outputs.opentsdb]]
#   ## prefix for metrics keys
#   prefix = "my.specific.prefix."
#
#   ## DNS name of the OpenTSDB server
#   ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
#   ## telnet API. "http://opentsdb.example.com" will use the Http API.
#   host = "opentsdb.example.com"
#
#   ## Port of the OpenTSDB server
#   port = 4242
#
#   ## Number of data points to send to OpenTSDB in Http requests.
#   ## Not used with telnet API.
#   http_batch_size = 50
#
#   ## URI Path for Http requests to OpenTSDB.
#   ## Used in cases where OpenTSDB is located behind a reverse proxy.
#   http_path = "/api/put"
#
#   ## Debug true - Prints OpenTSDB communication
#   debug = false
#
#   ## Separator separates measurement name from field
#   separator = "_"


# # Configuration for the Prometheus client to spawn
# [[outputs.prometheus_client]]
#   ## Address to listen on
#   listen = ":9273"
#
#   ## Use HTTP Basic Authentication.
#   # basic_username = "Foo"
#   # basic_password = "Bar"
#
#   ## If set, the IP Ranges which are allowed to access metrics.
#   ##   ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
#   # ip_range = []
#
#   ## Path to publish the metrics on.
#   # path = "/metrics"
#
#   ## Expiration interval for each metric. 0 == no expiration
#   # expiration_interval = "60s"
#
#   ## Collectors to enable, valid entries are "gocollector" and "process".
#   ## If unset, both are enabled.
#   # collectors_exclude = ["gocollector", "process"]
#
#   ## Send string metrics as Prometheus labels.
#   ## Unless set to false all string metrics will be sent as labels.
#   # string_as_label = true
#
#   ## If set, enable TLS with the given certificate.
#   # tls_cert = "/etc/ssl/telegraf.crt"
#   # tls_key = "/etc/ssl/telegraf.key"


# # Configuration for the Riemann server to send metrics to
# [[outputs.riemann]]
#   ## The full TCP or UDP URL of the Riemann server
#   url = "tcp://localhost:5555"
#
#   ## Riemann event TTL, floating-point time in seconds.
#   ## Defines how long that an event is considered valid for in Riemann
#   # ttl = 30.0
#
#   ## Separator to use between measurement and field name in Riemann service name
#   ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
#   separator = "/"
#
#   ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
#   # measurement_as_attribute = false
#
#   ## Send string metrics as Riemann event states.
#   ## Unless enabled all string metrics will be ignored
#   # string_as_state = false
#
#   ## A list of tag keys whose values get sent as Riemann tags.
#   ## If empty, all Telegraf tag values will be sent as tags
#   # tag_keys = ["telegraf","custom_tag"]
#
#   ## Additional Riemann tags to send.
#   # tags = ["telegraf-output"]
#
#   ## Description for Riemann event
#   # description_text = "metrics collected from telegraf"
#
#   ## Riemann client write timeout, defaults to "5s" if not set.
#   # timeout = "5s"


# # Configuration for the Riemann server to send metrics to
# [[outputs.riemann_legacy]]
#   ## URL of server
#   url = "localhost:5555"
#   ## transport protocol to use either tcp or udp
#   transport = "tcp"
#   ## separator to use between input name and field name in Riemann service name
#   separator = " "


# # Generic socket writer capable of handling multiple socket types.
# [[outputs.socket_writer]]
#   ## URL to connect to
#   # address = "tcp://127.0.0.1:8094"
#   # address = "tcp://example.com:http"
#   # address = "tcp4://127.0.0.1:8094"
#   # address = "tcp6://127.0.0.1:8094"
#   # address = "tcp6://[2001:db8::1]:8094"
#   # address = "udp://127.0.0.1:8094"
#   # address = "udp4://127.0.0.1:8094"
#   # address = "udp6://127.0.0.1:8094"
#   # address = "unix:///tmp/telegraf.sock"
#   # address = "unixgram:///tmp/telegraf.sock"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Period between keep alive probes.
#   ## Only applies to TCP sockets.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   # keep_alive_period = "5m"
#
#   ## Data format to generate.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # Configuration for Google Cloud Stackdriver to send metrics to
# [[outputs.stackdriver]]
#   # GCP Project
#   project = "erudite-bloom-151019"
#
#   # The namespace for the metric descriptor
#   namespace = "telegraf"


# # Configuration for Wavefront server to send metrics to
# [[outputs.wavefront]]
#   ## DNS name of the wavefront proxy server
#   host = "wavefront.example.com"
#
#   ## Port that the Wavefront proxy server listens on
#   port = 2878
#
#   ## prefix for metrics keys
#   #prefix = "my.specific.prefix."
#
#   ## whether to use "value" for name of simple fields
#   #simple_fields = false
#
#   ## character to use between metric and field name.  defaults to . (dot)
#   #metric_separator = "."
#
#   ## Convert metric name paths to use metricSeperator character
#   ## When true (default) will convert all _ (underscore) chartacters in final metric name
#   #convert_paths = true
#
#   ## Use Regex to sanitize metric and tag names from invalid characters
#   ## Regex is more thorough, but significantly slower
#   #use_regex = false
#
#   ## point tags to use as the source name for Wavefront (if none found, host will be used)
#   #source_override = ["hostname", "agent_host", "node_host"]
#
#   ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0.  default true
#   #convert_bool = true
#
#   ## Define a mapping, namespaced by metric prefix, from string values to numeric values
#   ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
#   ## any metrics beginning with "elasticsearch"
#   #[[outputs.wavefront.string_to_number.elasticsearch]]
#   #  green = 1.0
#   #  yellow = 0.5
#   #  red = 0.0



###############################################################################
#                            PROCESSOR PLUGINS                                #
###############################################################################

# # Convert values to another metric value type
# [[processors.converter]]
#   ## Tags to convert
#   ##
#   ## The table key determines the target type, and the array of key-values
#   ## select the keys to convert.  The array may contain globs.
#   ##   <target-type> = [<tag-key>...]
#   [processors.converter.tags]
#     string = []
#     integer = []
#     unsigned = []
#     boolean = []
#     float = []
#
#   ## Fields to convert
#   ##
#   ## The table key determines the target type, and the array of key-values
#   ## select the keys to convert.  The array may contain globs.
#   ##   <target-type> = [<field-key>...]
#   [processors.converter.fields]
#     tag = []
#     string = []
#     integer = []
#     unsigned = []
#     boolean = []
#     float = []


# # Map enum values according to given table.
# [[processors.enum]]
#   [[processors.enum.mapping]]
#     ## Name of the field to map
#     field = "status"
#
#     ## Destination field to be used for the mapped value.  By default the source
#     ## field is used, overwriting the original value.
#     # dest = "status_code"
#
#     ## Default value to be used for all values not contained in the mapping
#     ## table.  When unset, the unmodified value for the field will be used if no
#     ## match is found.
#     # default = 0
#
#     ## Table of mappings
#     [processors.enum.mapping.value_mappings]
#       green = 1
#       yellow = 2
#       red = 3


# # Apply metric modifications using override semantics.
# [[processors.override]]
#   ## All modifications on inputs and aggregators can be overridden:
#   # name_override = "new_name"
#   # name_prefix = "new_name_prefix"
#   # name_suffix = "new_name_suffix"
#
#   ## Tags to be added (all values must be strings)
#   # [processors.override.tags]
#   #   additional_tag = "tag_value"


# # Parse a value in a specified field/tag(s) and add the result in a new metric
# [[processors.parser]]
#   ## The name of the fields whose value will be parsed.
#   parse_fields = []
#
#   ## If true, incoming metrics are not emitted.
#   drop_original = false
#
#   ## If set to override, emitted metrics will be merged by overriding the
#   ## original metric using the newly parsed metrics.
#   merge = "override"
#
#   ## The dataformat to be read from files
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Print all metrics that pass through this filter.
# [[processors.printer]]


# # Transforms tag and field values with regex pattern
# [[processors.regex]]
#   ## Tag and field conversions defined in a separate sub-tables
#   # [[processors.regex.tags]]
#   #   ## Tag to change
#   #   key = "resp_code"
#   #   ## Regular expression to match on a tag value
#   #   pattern = "^(\\d)\\d\\d$"
#   #   ## Pattern for constructing a new value (${1} represents first subgroup)
#   #   replacement = "${1}xx"
#
#   # [[processors.regex.fields]]
#   #   key = "request"
#   #   ## All the power of the Go regular expressions available here
#   #   ## For example, named subgroups
#   #   pattern = "^/api(?P<method>/[\\w/]+)\\S*"
#   #   replacement = "${method}"
#   #   ## If result_key is present, a new field will be created
#   #   ## instead of changing existing field
#   #   result_key = "method"
#
#   ## Multiple conversions may be applied for one field sequentially
#   ## Let's extract one more value
#   # [[processors.regex.fields]]
#   #   key = "request"
#   #   pattern = ".*category=(\\w+).*"
#   #   replacement = "${1}"
#   #   result_key = "search_category"


# # Rename measurements, tags, and fields that pass through this filter.
# [[processors.rename]]


# # Perform string processing on tags, fields, and measurements
# [[processors.strings]]
#   ## Convert a tag value to uppercase
#   # [[processors.strings.uppercase]]
#   #   tag = "method"
#
#   ## Convert a field value to lowercase and store in a new field
#   # [[processors.strings.lowercase]]
#   #   field = "uri_stem"
#   #   dest = "uri_stem_normalised"
#
#   ## Trim leading and trailing whitespace using the default cutset
#   # [[processors.strings.trim]]
#   #   field = "message"
#
#   ## Trim leading characters in cutset
#   # [[processors.strings.trim_left]]
#   #   field = "message"
#   #   cutset = "\t"
#
#   ## Trim trailing characters in cutset
#   # [[processors.strings.trim_right]]
#   #   field = "message"
#   #   cutset = "\r\n"
#
#   ## Trim the given prefix from the field
#   # [[processors.strings.trim_prefix]]
#   #   field = "my_value"
#   #   prefix = "my_"
#
#   ## Trim the given suffix from the field
#   # [[processors.strings.trim_suffix]]
#   #   field = "read_count"
#   #   suffix = "_count"
#
#   ## Replace substrings within field names
#   # [[processors.strings.trim_suffix]]
#   #   measurement = "*"
#   #   old = ":"
#   #   new = "_"


# # Print all metrics that pass through this filter.
# [[processors.topk]]
#   ## How many seconds between aggregations
#   # period = 10
#
#   ## How many top metrics to return
#   # k = 10
#
#   ## Over which tags should the aggregation be done. Globs can be specified, in
#   ## which case any tag matching the glob will aggregated over. If set to an
#   ## empty list is no aggregation over tags is done
#   # group_by = ['*']
#
#   ## Over which fields are the top k are calculated
#   # fields = ["value"]
#
#   ## What aggregation to use. Options: sum, mean, min, max
#   # aggregation = "mean"
#
#   ## Instead of the top k largest metrics, return the bottom k lowest metrics
#   # bottomk = false
#
#   ## The plugin assigns each metric a GroupBy tag generated from its name and
#   ## tags. If this setting is different than "" the plugin will add a
#   ## tag (which name will be the value of this setting) to each metric with
#   ## the value of the calculated GroupBy tag. Useful for debugging
#   # add_groupby_tag = ""
#
#   ## These settings provide a way to know the position of each metric in
#   ## the top k. The 'add_rank_field' setting allows to specify for which
#   ## fields the position is required. If the list is non empty, then a field
#   ## will be added to each and every metric for each string present in this
#   ## setting. This field will contain the ranking of the group that
#   ## the metric belonged to when aggregated over that field.
#   ## The name of the field will be set to the name of the aggregation field,
#   ## suffixed with the string '_topk_rank'
#   # add_rank_fields = []
#
#   ## These settings provide a way to know what values the plugin is generating
#   ## when aggregating metrics. The 'add_agregate_field' setting allows to
#   ## specify for which fields the final aggregation value is required. If the
#   ## list is non empty, then a field will be added to each every metric for
#   ## each field present in this setting. This field will contain
#   ## the computed aggregation for the group that the metric belonged to when
#   ## aggregated over that field.
#   ## The name of the field will be set to the name of the aggregation field,
#   ## suffixed with the string '_topk_aggregate'
#   # add_aggregate_fields = []



###############################################################################
#                            AGGREGATOR PLUGINS                               #
###############################################################################

# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
#   ## General Aggregator Arguments:
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false


# # Create aggregate histograms.
# [[aggregators.histogram]]
#   ## The period in which to flush the aggregator.
#   period = "30s"
#
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#
#   ## Example config that aggregates all fields of the metric.
#   # [[aggregators.histogram.config]]
#   #   ## The set of buckets.
#   #   buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
#   #   ## The name of metric.
#   #   measurement_name = "cpu"
#
#   ## Example config that aggregates only specific fields of the metric.
#   # [[aggregators.histogram.config]]
#   #   ## The set of buckets.
#   #   buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
#   #   ## The name of metric.
#   #   measurement_name = "diskio"
#   #   ## The concrete fields of metric
#   #   fields = ["io_time", "read_time", "write_time"]


# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
#   ## General Aggregator Arguments:
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false


# # Count the occurrence of values in fields.
# [[aggregators.valuecounter]]
#   ## General Aggregator Arguments:
#   ## The period on which to flush & clear the aggregator.
#   period = "30s"
#   ## If true, the original metric will be dropped by the
#   ## aggregator and will not get sent to the output plugins.
#   drop_original = false
#   ## The fields for which the values will be counted
#   fields = []



###############################################################################
#                            INPUT PLUGINS                                    #
###############################################################################

# Read metrics about cpu usage
[[inputs.cpu]]
  ## Whether to report per-cpu stats or not
  percpu = true
  ## Whether to report total system cpu stats or not
  totalcpu = true
  ## If true, collect raw CPU time metrics.
  collect_cpu_time = false
  ## If true, compute and report the sum of all non-idle CPU states.
  report_active = false


# Read metrics about disk usage by mount point
[[inputs.disk]]
  ## By default stats will be gathered for all mount points.
  ## Set mount_points will restrict the stats to only the specified mount points.
  # mount_points = ["/"]

  ## Ignore mount points by filesystem type.
  ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]


# Read metrics about disk IO by device
[[inputs.diskio]]
  ## By default, telegraf will gather stats for all devices including
  ## disk partitions.
  ## Setting devices will restrict the stats to the specified devices.
  # devices = ["sda", "sdb", "vd*"]
  ## Uncomment the following line if you need disk serial numbers.
  # skip_serial_number = false
  #
  ## On systems which support it, device metadata can be added in the form of
  ## tags.
  ## Currently only Linux is supported via udev properties. You can view
  ## available properties for a device by running:
  ## 'udevadm info -q property -n /dev/sda'
  # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
  #
  ## Using the same metadata source as device_tags, you can also customize the
  ## name of the device via templates.
  ## The 'name_templates' parameter is a list of templates to try and apply to
  ## the device. The template may contain variables in the form of '$PROPERTY' or
  ## '${PROPERTY}'. The first template which does not contain any variables not
  ## present for the device is used as the device name tag.
  ## The typical use case is for LVM volumes, to get the VG/LV name instead of
  ## the near-meaningless DM-0 name.
  # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]


# Get kernel statistics from /proc/stat
[[inputs.kernel]]
  # no configuration


# Read metrics about memory usage
[[inputs.mem]]
  # no configuration


# Get the number of processes and group them by status
[[inputs.processes]]
  # no configuration


# Read metrics about swap memory usage
[[inputs.swap]]
  # no configuration


# Read metrics about system load & uptime
[[inputs.system]]
  # no configuration


# # Gather ActiveMQ metrics
# [[inputs.activemq]]
#   ## Required ActiveMQ Endpoint
#   # server = "192.168.50.10"
#
#   ## Required ActiveMQ port
#   # port = 8161
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## Required ActiveMQ webadmin root path
#   # webadmin = "admin"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification


# # Read stats from aerospike server(s)
# [[inputs.aerospike]]
#   ## Aerospike servers to connect to (with port)
#   ## This plugin will query all namespaces the aerospike
#   ## server has configured and get stats for them.
#   servers = ["localhost:3000"]
#
#   # username = "telegraf"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # enable_tls = false
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true


# # Read Apache status information (mod_status)
# [[inputs.apache]]
#   ## An array of URLs to gather from, must be directed at the machine
#   ## readable version of the mod_status page including the auto query string.
#   ## Default is "http://localhost/server-status?auto".
#   urls = ["http://localhost/server-status?auto"]
#
#   ## Credentials for basic HTTP authentication.
#   # username = "myuser"
#   # password = "mypassword"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Gather metrics from Apache Aurora schedulers
# [[inputs.aurora]]
#   ## Schedulers are the base addresses of your Aurora Schedulers
#   schedulers = ["http://127.0.0.1:8081"]
#
#   ## Set of role types to collect metrics from.
#   ##
#   ## The scheduler roles are checked each interval by contacting the
#   ## scheduler nodes; zookeeper is not contacted.
#   # roles = ["leader", "follower"]
#
#   ## Timeout is the max time for total network operations.
#   # timeout = "5s"
#
#   ## Username and password are sent using HTTP Basic Auth.
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics of bcache from stats_total and dirty_data
# [[inputs.bcache]]
#   ## Bcache sets path
#   ## If not specified, then default is:
#   bcachePath = "/sys/fs/bcache"
#
#   ## By default, telegraf gather stats for all bcache devices
#   ## Setting devices will restrict the stats to the specified
#   ## bcache devices.
#   bcacheDevs = ["bcache0"]


# # Collects Beanstalkd server and tubes stats
# [[inputs.beanstalkd]]
#   ## Server to collect data from
#   server = "localhost:11300"
#
#   ## List of tubes to gather stats about.
#   ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
#   tubes = ["notifications"]


# # Collect bond interface status, slaves statuses and failures count
# [[inputs.bond]]
#   ## Sets 'proc' directory path
#   ## If not specified, then default is /proc
#   # host_proc = "/proc"
#
#   ## By default, telegraf gather stats for all bond interfaces
#   ## Setting interfaces will restrict the stats to the specified
#   ## bond interfaces.
#   # bond_interfaces = ["bond0"]


# # Collect Kafka topics and consumers status from Burrow HTTP API.
# [[inputs.burrow]]
#   ## Burrow API endpoints in format "schema://host:port".
#   ## Default is "http://localhost:8000".
#   servers = ["http://localhost:8000"]
#
#   ## Override Burrow API prefix.
#   ## Useful when Burrow is behind reverse-proxy.
#   # api_prefix = "/v3/kafka"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Limit per-server concurrent connections.
#   ## Useful in case of large number of topics or consumer groups.
#   # concurrent_connections = 20
#
#   ## Filter clusters, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # clusters_include = []
#   # clusters_exclude = []
#
#   ## Filter consumer groups, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # groups_include = []
#   # groups_exclude = []
#
#   ## Filter topics, default is no filtering.
#   ## Values can be specified as glob patterns.
#   # topics_include = []
#   # topics_exclude = []
#
#   ## Credentials for basic HTTP authentication.
#   # username = ""
#   # password = ""
#
#   ## Optional SSL config
#   # ssl_ca = "/etc/telegraf/ca.pem"
#   # ssl_cert = "/etc/telegraf/cert.pem"
#   # ssl_key = "/etc/telegraf/key.pem"
#   # insecure_skip_verify = false


# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
#   ## This is the recommended interval to poll.  Too frequent and you will lose
#   ## data points due to timeouts during rebalancing and recovery
#   interval = '1m'
#
#   ## All configuration values are optional, defaults are shown below
#
#   ## location of ceph binary
#   ceph_binary = "/usr/bin/ceph"
#
#   ## directory in which to look for socket files
#   socket_dir = "/var/run/ceph"
#
#   ## prefix of MON and OSD socket files, used to determine socket type
#   mon_prefix = "ceph-mon"
#   osd_prefix = "ceph-osd"
#
#   ## suffix used to identify socket files
#   socket_suffix = "asok"
#
#   ## Ceph user to authenticate as
#   ceph_user = "client.admin"
#
#   ## Ceph configuration to use to locate the cluster
#   ceph_config = "/etc/ceph/ceph.conf"
#
#   ## Whether to gather statistics via the admin socket
#   gather_admin_socket_stats = true
#
#   ## Whether to gather statistics via ceph commands
#   gather_cluster_stats = false


# # Read specific statistics per cgroup
# [[inputs.cgroup]]
#   ## Directories in which to look for files, globs are supported.
#   ## Consider restricting paths to the set of cgroups you really
#   ## want to monitor if you have a large number of cgroups, to avoid
#   ## any cardinality issues.
#   # paths = [
#   #   "/cgroup/memory",
#   #   "/cgroup/memory/child1",
#   #   "/cgroup/memory/child2/*",
#   # ]
#   ## cgroup stat fields, as file names, globs are supported.
#   ## these file names are appended to each path from above.
#   # files = ["memory.*usage*", "memory.limit_in_bytes"]


# # Get standard chrony metrics, requires chronyc executable.
# [[inputs.chrony]]
#   ## If true, chronyc tries to perform a DNS lookup for the time server.
#   # dns_lookup = false


# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
#   ## Amazon Region
#   region = "us-east-1"
#
#   ## Amazon Credentials
#   ## Credentials are loaded in the following order
#   ## 1) Assumed credentials via STS if role_arn is specified
#   ## 2) explicit credentials from 'access_key' and 'secret_key'
#   ## 3) shared profile from 'profile'
#   ## 4) environment variables
#   ## 5) shared credentials file
#   ## 6) EC2 Instance Profile
#   #access_key = ""
#   #secret_key = ""
#   #token = ""
#   #role_arn = ""
#   #profile = ""
#   #shared_credential_file = ""
#
#   ## Endpoint to make request against, the correct endpoint is automatically
#   ## determined and this option should only be set if you wish to override the
#   ## default.
#   ##   ex: endpoint_url = "http://localhost:8000"
#   # endpoint_url = ""
#
#   # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
#   # metrics are made available to the 1 minute period. Some are collected at
#   # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
#   # Note that if a period is configured that is smaller than the minimum for a
#   # particular metric, that metric will not be returned by the Cloudwatch API
#   # and will not be collected by Telegraf.
#   #
#   ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
#   period = "5m"
#
#   ## Collection Delay (required - must account for metrics availability via CloudWatch API)
#   delay = "5m"
#
#   ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
#   ## gaps or overlap in pulled data
#   interval = "5m"
#
#   ## Configure the TTL for the internal cache of metrics.
#   ## Defaults to 1 hr if not specified
#   #cache_ttl = "10m"
#
#   ## Metric Statistic Namespace (required)
#   namespace = "AWS/ELB"
#
#   ## Maximum requests per second. Note that the global default AWS rate limit is
#   ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
#   ## maximum of 400. Optional - default value is 200.
#   ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
#   ratelimit = 200
#
#   ## Metrics to Pull (optional)
#   ## Defaults to all Metrics in Namespace if nothing is provided
#   ## Refreshes Namespace available metrics every 1h
#   #[[inputs.cloudwatch.metrics]]
#   #  names = ["Latency", "RequestCount"]
#   #
#   #  ## Dimension filters for Metric.  These are optional however all dimensions
#   #  ## defined for the metric names must be specified in order to retrieve
#   #  ## the metric statistics.
#   #  [[inputs.cloudwatch.metrics.dimensions]]
#   #    name = "LoadBalancerName"
#   #    value = "p-example"


# # Collects conntrack stats from the configured directories and files.
# [[inputs.conntrack]]
#    ## The following defaults would work with multiple versions of conntrack.
#    ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
#    ## kernel versions, as are the directory locations.
#
#    ## Superset of filenames to look for within the conntrack dirs.
#    ## Missing files will be ignored.
#    files = ["ip_conntrack_count","ip_conntrack_max",
#             "nf_conntrack_count","nf_conntrack_max"]
#
#    ## Directories to search within for the conntrack files above.
#    ## Missing directrories will be ignored.
#    dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]


# # Gather health check statuses from services registered in Consul
# [[inputs.consul]]
#   ## Consul server address
#   # address = "localhost"
#
#   ## URI scheme for the Consul server, one of "http", "https"
#   # scheme = "http"
#
#   ## ACL token used in every request
#   # token = ""
#
#   ## HTTP Basic Authentication username and password.
#   # username = ""
#   # password = ""
#
#   ## Data centre to query the health checks from
#   # datacentre = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true
#
#   ## Consul checks' tag splitting
#   # When tags are formatted like "key:value" with ":" as a delimiter then
#   # they will be splitted and reported as proper key:value in Telegraf
#   # tag_delimiter = ":"


# # Read metrics from one or many couchbase clusters
# [[inputs.couchbase]]
#   ## specify servers via a url matching:
#   ##  [protocol://][:password]@address[:port]
#   ##  e.g.
#   ##    http://couchbase-0.example.com/
#   ##    http://admin:secret@couchbase-0.example.com:8091/
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no protocol is specified, HTTP is used.
#   ## If no port is specified, 8091 is used.
#   servers = ["http://localhost:8091"]


# # Read CouchDB Stats from one or more servers
# [[inputs.couchdb]]
#   ## Works with CouchDB stats endpoints out of the box
#   ## Multiple Hosts from which to read CouchDB stats:
#   hosts = ["http://localhost:8086/_stats"]


# # Input plugin for DC/OS metrics
# [[inputs.dcos]]
#   ## The DC/OS cluster URL.
#   cluster_url = "https://dcos-ee-master-1"
#
#   ## The ID of the service account.
#   service_account_id = "telegraf"
#   ## The private key file for the service account.
#   service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
#
#   ## Path containing login token.  If set, will read on every gather.
#   # token_file = "/home/dcos/.dcos/token"
#
#   ## In all filter options if both include and exclude are empty all items
#   ## will be collected.  Arrays may contain glob patterns.
#   ##
#   ## Node IDs to collect metrics from.  If a node is excluded, no metrics will
#   ## be collected for its containers or apps.
#   # node_include = []
#   # node_exclude = []
#   ## Container IDs to collect container metrics from.
#   # container_include = []
#   # container_exclude = []
#   ## Container IDs to collect app metrics from.
#   # app_include = []
#   # app_exclude = []
#
#   ## Maximum concurrent connections to the cluster.
#   # max_connections = 10
#   ## Maximum time to receive a response from cluster.
#   # response_timeout = "20s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true
#
#   ## Recommended filtering to reduce series cardinality.
#   # [inputs.dcos.tagdrop]
#   #   path = ["/var/lib/mesos/slave/slaves/*"]


# # Read metrics from one or many disque servers
# [[inputs.disque]]
#   ## An array of URI to gather stats about. Specify an ip or hostname
#   ## with optional port and password.
#   ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["localhost"]


# # Provide a native collection for dmsetup based statistics for dm-cache
# [[inputs.dmcache]]
#   ## Whether to report per-device stats or not
#   per_device = true


# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
#   ## servers to query
#   servers = ["8.8.8.8"]
#
#   ## Network is the network protocol name.
#   # network = "udp"
#
#   ## Domains or subdomains to query.
#   # domains = ["."]
#
#   ## Query record type.
#   ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
#   # record_type = "A"
#
#   ## Dns server port.
#   # port = 53
#
#   ## Query timeout in seconds.
#   # timeout = 2


# # Read metrics about docker containers
# [[inputs.docker]]
#   ## Docker Endpoint
#   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
#   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
#   endpoint = "unix:///var/run/docker.sock"
#
#   ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
#   gather_services = false
#
#   ## Only collect metrics for these containers, collect all if empty
#   container_names = []
#
#   ## Containers to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all containers
#   container_name_include = []
#   container_name_exclude = []
#
#   ## Container states to include and exclude. Globs accepted.
#   ## When empty only containers in the "running" state will be captured.
#   # container_state_include = []
#   # container_state_exclude = []
#
#   ## Timeout for docker list, info, and stats commands
#   timeout = "5s"
#
#   ## Whether to report for each container per-device blkio (8:0, 8:1...) and
#   ## network (eth0, eth1, ...) stats or not
#   perdevice = true
#   ## Whether to report for each container total blkio and network stats or not
#   total = false
#   ## Which environment variables should we use as a tag
#   ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
#
#   ## docker labels to include and exclude as tags.  Globs accepted.
#   ## Note that an empty array for both will include all labels as tags
#   docker_label_include = []
#   docker_label_exclude = []
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read statistics from one or many dovecot servers
# [[inputs.dovecot]]
#   ## specify dovecot servers via an address:port list
#   ##  e.g.
#   ##    localhost:24242
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["localhost:24242"]
#   ## Type is one of "user", "domain", "ip", or "global"
#   type = "global"
#   ## Wildcard matches like "*.com". An empty string "" is same as "*"
#   ## If type = "ip" filters should be <IP/network>
#   filters = [""]


# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
#   ## specify a list of one or more Elasticsearch servers
#   # you can add username and password to your url to use basic authentication:
#   # servers = ["http://user:pass@localhost:9200"]
#   servers = ["http://localhost:9200"]
#
#   ## Timeout for HTTP requests to the elastic search server(s)
#   http_timeout = "5s"
#
#   ## When local is true (the default), the node will read only its own stats.
#   ## Set local to false when you want to read the node stats from all nodes
#   ## of the cluster.
#   local = true
#
#   ## Set cluster_health to true when you want to also obtain cluster health stats
#   cluster_health = false
#
#   ## Adjust cluster_health_level when you want to also obtain detailed health stats
#   ## The options are
#   ##  - indices (default)
#   ##  - cluster
#   # cluster_health_level = "indices"
#
#   ## Set cluster_stats to true when you want to also obtain cluster stats from the
#   ## Master node.
#   cluster_stats = false
#
#   ## node_stats is a list of sub-stats that you want to have gathered. Valid options
#   ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
#   ## "breaker". Per default, all stats are gathered.
#   # node_stats = ["jvm", "http"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]]
#   ## Commands array
#   commands = [
#     "/tmp/test.sh",
#     "/usr/bin/mycollector --foo=bar",
#     "/tmp/collect_*.sh"
#   ]
#
#   ## Timeout for each command to complete.
#   timeout = "5s"
#
#   ## measurement name suffix (for separating different commands)
#   name_suffix = "_mycollector"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
#   ## Use sudo to run fail2ban-client
#   use_sudo = false


# # Read devices value(s) from a Fibaro controller
# [[inputs.fibaro]]
#   ## Required Fibaro controller address/hostname.
#   ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
#   url = "http://<controller>:80"
#
#   ## Required credentials to access the API (http://<controller/api/<component>)
#   username = "<username>"
#   password = "<password>"
#
#   ## Amount of time allowed to complete the HTTP request
#   # timeout = "5s"


# # Reload and gather from file[s] on telegraf's interval.
# [[inputs.file]]
#   ## Files to parse each interval.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**.log     -> recursively find all .log files in /var/log
#   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
#   ##   /var/log/apache.log -> only read the apache log file
#   files = ["/var/log/apache/access.log"]
#
#   ## The dataformat to be read from files
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Count files in a directory
# [[inputs.filecount]]
#   ## Directory to gather stats about.
#   ##   deprecated in 1.9; use the directories option
#   directory = "/var/cache/apt/archives"
#
#   ## Directories to gather stats about.
#   ## This accept standard unit glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**    -> recursively find all directories in /var/log and count files in each directories
#   ##   /var/log/*/*   -> find all directories with a parent dir in /var/log and count files in each directories
#   ##   /var/log       -> count all files in /var/log and all of its subdirectories
#   directories = ["/var/cache/apt/archives"]
#
#   ## Only count files that match the name pattern. Defaults to "*".
#   name = "*.deb"
#
#   ## Count files in subdirectories. Defaults to true.
#   recursive = false
#
#   ## Only count regular files. Defaults to true.
#   regular_only = true
#
#   ## Only count files that are at least this size. If size is
#   ## a negative number, only count files that are smaller than the
#   ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
#   ## Without quotes and units, interpreted as size in bytes.
#   size = "0B"
#
#   ## Only count files that have not been touched for at least this
#   ## duration. If mtime is negative, only count files that have been
#   ## touched in this duration. Defaults to "0s".
#   mtime = "0s"


# # Read stats about given file(s)
# [[inputs.filestat]]
#   ## Files to gather stats about.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
#   ##   "/var/log/apache.log" -> just tail the apache log file
#   ##
#   ## See https://github.com/gobwas/glob for more examples
#   ##
#   files = ["/var/log/**.log"]
#   ## If true, read the entire file and calculate an md5 checksum.
#   md5 = false


# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
#   ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
#   ##
#   ## Endpoint:
#   ## - only one URI is allowed
#   ## - https is not supported
#   endpoint = "http://localhost:24220/api/plugins.json"
#
#   ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
#   exclude = [
# 	  "monitor_agent",
# 	  "dummy",
#   ]


# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
#   ## API endpoint, currently supported API:
#   ##
#   ##   - multiple  (Ex http://<host>:12900/system/metrics/multiple)
#   ##   - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
#   ##
#   ## For namespace endpoint, the metrics array will be ignored for that call.
#   ## Endpoint can contain namespace and multiple type calls.
#   ##
#   ## Please check http://[graylog-server-ip]:12900/api-browser for full list
#   ## of endpoints
#   servers = [
#     "http://[graylog-server-ip]:12900/system/metrics/multiple",
#   ]
#
#   ## Metrics list
#   ## List of metrics can be found on Graylog webservice documentation.
#   ## Or by hitting the the web service api at:
#   ##   http://[graylog-host]:12900/system/metrics
#   metrics = [
#     "jvm.cl.loaded",
#     "jvm.memory.pools.Metaspace.committed"
#   ]
#
#   ## Username and password
#   username = ""
#   password = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics of haproxy, via socket or csv stats page
# [[inputs.haproxy]]
#   ## An array of address to gather stats about. Specify an ip on hostname
#   ## with optional port. ie localhost, 10.10.3.33:1936, etc.
#   ## Make sure you specify the complete path to the stats endpoint
#   ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
#
#   ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
#   servers = ["http://myhaproxy.com:1936/haproxy?stats"]
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## You can also use local socket with standard wildcard globbing.
#   ## Server address not starting with 'http' will be treated as a possible
#   ## socket, so both examples below are valid.
#   # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
#
#   ## By default, some of the fields are renamed from what haproxy calls them.
#   ## Setting this option to true results in the plugin keeping the original
#   ## field names.
#   # keep_field_names = false
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Monitor disks' temperatures using hddtemp
# [[inputs.hddtemp]]
#   ## By default, telegraf gathers temps data from all disks detected by the
#   ## hddtemp.
#   ##
#   ## Only collect temps from the selected disks.
#   ##
#   ## A * as the device name will return the temperature values of all disks.
#   ##
#   # address = "127.0.0.1:7634"
#   # devices = ["sda", "*"]


# # Read formatted metrics from one or more HTTP endpoints
# [[inputs.http]]
#   ## One or more URLs from which to read formatted metrics
#   urls = [
#     "http://localhost/metrics"
#   ]
#
#   ## HTTP method
#   # method = "GET"
#
#   ## Optional HTTP headers
#   # headers = {"X-Special-Header" = "Special-Value"}
#
#   ## Optional HTTP Basic Auth Credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Amount of time allowed to complete the HTTP request
#   # timeout = "5s"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
#   ## Server address (default http://localhost)
#   # address = "http://localhost"
#
#   ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
#   # http_proxy = "http://localhost:8888"
#
#   ## Set response_timeout (default 5 seconds)
#   # response_timeout = "5s"
#
#   ## HTTP Request Method
#   # method = "GET"
#
#   ## Whether to follow redirects from the server (defaults to false)
#   # follow_redirects = false
#
#   ## Optional HTTP Request Body
#   # body = '''
#   # {'fake':'data'}
#   # '''
#
#   ## Optional substring or regex match in body of the response
#   # response_string_match = "\"service_status\": \"up\""
#   # response_string_match = "ok"
#   # response_string_match = "\".*_status\".?:.?\"up\""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## HTTP Request Headers (all values must be strings)
#   # [inputs.http_response.headers]
#   #   Host = "github.com"


# # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]]
#   ## NOTE This plugin only reads numerical measurements, strings and booleans
#   ## will be ignored.
#
#   ## Name for the service being polled.  Will be appended to the name of the
#   ## measurement e.g. httpjson_webserver_stats
#   ##
#   ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
#   name = "webserver_stats"
#
#   ## URL of each server in the service's cluster
#   servers = [
#     "http://localhost:9999/stats/",
#     "http://localhost:9998/stats/",
#   ]
#   ## Set response_timeout (default 5 seconds)
#   response_timeout = "5s"
#
#   ## HTTP method to use: GET or POST (case-sensitive)
#   method = "GET"
#
#   ## List of tag names to extract from top-level of JSON server response
#   # tag_keys = [
#   #   "my_tag_1",
#   #   "my_tag_2"
#   # ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## HTTP parameters (all values must be strings).  For "GET" requests, data
#   ## will be included in the query.  For "POST" requests, data will be included
#   ## in the request body as "x-www-form-urlencoded".
#   # [inputs.httpjson.parameters]
#   #   event_type = "cpu_spike"
#   #   threshold = "0.75"
#
#   ## HTTP Headers (all values must be strings)
#   # [inputs.httpjson.headers]
#   #   X-Auth-Token = "my-xauth-token"
#   #   apiVersion = "v1"


# # Gather Icinga2 status
# [[inputs.icinga2]]
#   ## Required Icinga2 server address (default: "https://localhost:5665")
#   # server = "https://localhost:5665"
#
#   ## Required Icinga2 object type ("services" or "hosts, default "services")
#   # object_type = "services"
#
#   ## Credentials for basic HTTP authentication
#   # username = "admin"
#   # password = "admin"
#
#   ## Maximum time to receive response.
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true


# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.influxdb]]
#   ## Works with InfluxDB debug endpoints out of the box,
#   ## but other services can use this format too.
#   ## See the influxdb plugin's README for more details.
#
#   ## Multiple URLs from which to read InfluxDB-formatted JSON
#   ## Default is "http://localhost:8086/debug/vars".
#   urls = [
#     "http://localhost:8086/debug/vars"
#   ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## http request & header timeout
#   timeout = "5s"


# # Collect statistics about itself
# [[inputs.internal]]
#   ## If true, collect telegraf memory stats.
#   # collect_memstats = true


# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
# [[inputs.interrupts]]
#   ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
#   # [inputs.interrupts.tagdrop]
#     # irq = [ "NET_RX", "TASKLET" ]


# # Read metrics from the bare metal servers via IPMI
# [[inputs.ipmi_sensor]]
#   ## optionally specify the path to the ipmitool executable
#   # path = "/usr/bin/ipmitool"
#   ##
#   ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
#   # privilege = "ADMINISTRATOR"
#   ##
#   ## optionally specify one or more servers via a url matching
#   ##  [username[:password]@][protocol[(address)]]
#   ##  e.g.
#   ##    root:passwd@lan(127.0.0.1)
#   ##
#   ## if no servers are specified, local machine sensor stats will be queried
#   ##
#   # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
#   ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
#   ## gaps or overlap in pulled data
#   interval = "30s"
#
#   ## Timeout for the ipmitool command to complete
#   timeout = "20s"
#
#   ## Schema Version: (Optional, defaults to version 1)
#   metric_version = 2


# # Gather packets and bytes counters from Linux ipsets
# [[inputs.ipset]]
#   ## By default, we only show sets which have already matched at least 1 packet.
#   ## set include_unmatched_sets = true to gather them all.
#   include_unmatched_sets = false
#   ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
#   use_sudo = false
#   ## The default timeout of 1s for ipset execution can be overridden here:
#   # timeout = "1s"


# # Gather packets and bytes throughput from iptables
# [[inputs.iptables]]
#   ## iptables require root access on most systems.
#   ## Setting 'use_sudo' to true will make use of sudo to run iptables.
#   ## Users must configure sudo to allow telegraf user to run iptables with no password.
#   ## iptables can be restricted to only list command "iptables -nvL".
#   use_sudo = false
#   ## Setting 'use_lock' to true runs iptables with the "-w" option.
#   ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
#   use_lock = false
#   ## Define an alternate executable, such as "ip6tables". Default is "iptables".
#   # binary = "ip6tables"
#   ## defines the table to monitor:
#   table = "filter"
#   ## defines the chains to monitor.
#   ## NOTE: iptables rules without a comment will not be monitored.
#   ## Read the plugin documentation for more information.
#   chains = [ "INPUT" ]


# # Collect virtual and real server stats from Linux IPVS
# [[inputs.ipvs]]
#   # no configuration


# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
#   # DEPRECATED: the jolokia plugin has been deprecated in favor of the
#   # jolokia2 plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
#   ## This is the context root used to compose the jolokia url
#   ## NOTE that Jolokia requires a trailing slash at the end of the context root
#   ## NOTE that your jolokia security policy must allow for POST requests.
#   context = "/jolokia/"
#
#   ## This specifies the mode used
#   # mode = "proxy"
#   #
#   ## When in proxy mode this section is used to specify further
#   ## proxy address configurations.
#   ## Remember to change host address to fit your environment.
#   # [inputs.jolokia.proxy]
#   #   host = "127.0.0.1"
#   #   port = "8080"
#
#   ## Optional http timeouts
#   ##
#   ## response_header_timeout, if non-zero, specifies the amount of time to wait
#   ## for a server's response headers after fully writing the request.
#   # response_header_timeout = "3s"
#   ##
#   ## client_timeout specifies a time limit for requests made by this client.
#   ## Includes connection time, any redirects, and reading the response body.
#   # client_timeout = "4s"
#
#   ## Attribute delimiter
#   ##
#   ## When multiple attributes are returned for a single
#   ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
#   ## name, and the attribute name, separated by the given delimiter.
#   # delimiter = "_"
#
#   ## List of servers exposing jolokia read service
#   [[inputs.jolokia.servers]]
#     name = "as-server-01"
#     host = "127.0.0.1"
#     port = "8080"
#     # username = "myuser"
#     # password = "mypassword"
#
#   ## List of metrics collected on above servers
#   ## Each metric consists in a name, a jmx path and either
#   ## a pass or drop slice attribute.
#   ## This collect all heap memory usage metrics.
#   [[inputs.jolokia.metrics]]
#     name = "heap_memory_usage"
#     mbean  = "java.lang:type=Memory"
#     attribute = "HeapMemoryUsage"
#
#   ## This collect thread counts metrics.
#   [[inputs.jolokia.metrics]]
#     name = "thread_count"
#     mbean  = "java.lang:type=Threading"
#     attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
#
#   ## This collect number of class loaded/unloaded counts metrics.
#   [[inputs.jolokia.metrics]]
#     name = "class_count"
#     mbean  = "java.lang:type=ClassLoading"
#     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"


# # Read JMX metrics from a Jolokia REST agent endpoint
# [[inputs.jolokia2_agent]]
#   # default_tag_prefix      = ""
#   # default_field_prefix    = ""
#   # default_field_separator = "."
#
#   # Add agents URLs to query
#   urls = ["http://localhost:8080/jolokia"]
#   # username = ""
#   # password = ""
#   # response_timeout = "5s"
#
#   ## Optional TLS config
#   # tls_ca   = "/var/private/ca.pem"
#   # tls_cert = "/var/private/client.pem"
#   # tls_key  = "/var/private/client-key.pem"
#   # insecure_skip_verify = false
#
#   ## Add metrics to read
#   [[inputs.jolokia2_agent.metric]]
#     name  = "java_runtime"
#     mbean = "java.lang:type=Runtime"
#     paths = ["Uptime"]


# # Read JMX metrics from a Jolokia REST proxy endpoint
# [[inputs.jolokia2_proxy]]
#   # default_tag_prefix      = ""
#   # default_field_prefix    = ""
#   # default_field_separator = "."
#
#   ## Proxy agent
#   url = "http://localhost:8080/jolokia"
#   # username = ""
#   # password = ""
#   # response_timeout = "5s"
#
#   ## Optional TLS config
#   # tls_ca   = "/var/private/ca.pem"
#   # tls_cert = "/var/private/client.pem"
#   # tls_key  = "/var/private/client-key.pem"
#   # insecure_skip_verify = false
#
#   ## Add proxy targets to query
#   # default_target_username = ""
#   # default_target_password = ""
#   [[inputs.jolokia2_proxy.target]]
#     url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
#     # username = ""
#     # password = ""
#
#   ## Add metrics to read
#   [[inputs.jolokia2_proxy.metric]]
#     name  = "java_runtime"
#     mbean = "java.lang:type=Runtime"
#     paths = ["Uptime"]


# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.kapacitor]]
#   ## Multiple URLs from which to read Kapacitor-formatted JSON
#   ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
#   urls = [
#     "http://localhost:9092/kapacitor/v1/debug/vars"
#   ]
#
#   ## Time limit for http requests
#   timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Get kernel statistics from /proc/vmstat
# [[inputs.kernel_vmstat]]
#   # no configuration


# # Read status information from one or more Kibana servers
# [[inputs.kibana]]
#   ## specify a list of one or more Kibana servers
#   servers = ["http://localhost:5601"]
#
#   ## Timeout for HTTP requests
#   timeout = "5s"
#
#   ## HTTP Basic Auth credentials
#   # username = "username"
#   # password = "pa$$word"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
#   ## URL for the kubelet
#   url = "http://1.1.1.1:10255"
#
#   ## Use bearer token for authorization
#   # bearer_token = /path/to/bearer/token
#
#   ## Set response_timeout (default 5 seconds)
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = /path/to/cafile
#   # tls_cert = /path/to/certfile
#   # tls_key = /path/to/keyfile
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
#   ## An array of URLs of the form:
#   ##   host [ ":" port]
#   servers = ["127.0.0.1:4020"]


# # Provides Linux sysctl fs metrics
# [[inputs.linux_sysctl_fs]]
#   # no configuration


# # Read metrics from local Lustre service on OST, MDS
# [[inputs.lustre2]]
#   ## An array of /proc globs to search for Lustre stats
#   ## If not specified, the default will work on Lustre 2.5.x
#   ##
#   # ost_procfiles = [
#   #   "/proc/fs/lustre/obdfilter/*/stats",
#   #   "/proc/fs/lustre/osd-ldiskfs/*/stats",
#   #   "/proc/fs/lustre/obdfilter/*/job_stats",
#   # ]
#   # mds_procfiles = [
#   #   "/proc/fs/lustre/mdt/*/md_stats",
#   #   "/proc/fs/lustre/mdt/*/job_stats",
#   # ]


# # Gathers metrics from the /3.0/reports MailChimp API
# [[inputs.mailchimp]]
#   ## MailChimp API key
#   ## get from https://admin.mailchimp.com/account/api/
#   api_key = "" # required
#   ## Reports for campaigns sent more than days_old ago will not be collected.
#   ## 0 means collect all.
#   days_old = 0
#   ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
#   # campaign_id = ""


# # Read metrics from one or many mcrouter servers
# [[inputs.mcrouter]]
#   ## An array of address to gather stats about. Specify an ip or hostname
#   ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
# 	servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
#
# 	## Timeout for metric collections from all servers.  Minimum timeout is "1s".
#   # timeout = "5s"


# # Read metrics from one or many memcached servers
# [[inputs.memcached]]
#   ## An array of address to gather stats about. Specify an ip on hostname
#   ## with optional port. ie localhost, 10.0.0.1:11211, etc.
#   servers = ["localhost:11211"]
#   # unix_sockets = ["/var/run/memcached.sock"]


# # Telegraf plugin for gathering metrics from N Mesos masters
# [[inputs.mesos]]
#   ## Timeout, in ms.
#   timeout = 100
#   ## A list of Mesos masters.
#   masters = ["http://localhost:5050"]
#   ## Master metrics groups to be collected, by default, all enabled.
#   master_collections = [
#     "resources",
#     "master",
#     "system",
#     "agents",
#     "frameworks",
#     "tasks",
#     "messages",
#     "evqueue",
#     "registrar",
#   ]
#   ## A list of Mesos slaves, default is []
#   # slaves = []
#   ## Slave metrics groups to be collected, by default, all enabled.
#   # slave_collections = [
#   #   "resources",
#   #   "agent",
#   #   "system",
#   #   "executors",
#   #   "tasks",
#   #   "messages",
#   # ]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Collects scores from a minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
#   ## server address for minecraft
#   # server = "localhost"
#   ## port for RCON
#   # port = "25575"
#   ## password RCON for mincraft server
#   # password = ""


# # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]]
#   ## An array of URLs of the form:
#   ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
#   ## For example:
#   ##   mongodb://user:auth_key@10.10.3.30:27017,
#   ##   mongodb://10.10.3.33:18832,
#   servers = ["mongodb://127.0.0.1:27017"]
#
#   ## When true, collect per database stats
#   # gather_perdb_stats = false
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
#   ## specify servers via a url matching:
#   ##  [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
#   ##  see https://github.com/go-sql-driver/mysql#dsn-data-source-name
#   ##  e.g.
#   ##    servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
#   ##    servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
#   #
#   ## If no servers are specified, then localhost is used as the host.
#   servers = ["tcp(127.0.0.1:3306)/"]
#
#   ## Selects the metric output format.
#   ##
#   ## This option exists to maintain backwards compatibility, if you have
#   ## existing metrics do not set or change this value until you are ready to
#   ## migrate to the new format.
#   ##
#   ## If you do not have existing metrics from this plugin set to the latest
#   ## version.
#   ##
#   ## Telegraf >=1.6: metric_version = 2
#   ##           <1.6: metric_version = 1 (or unset)
#   metric_version = 2
#
#   ## the limits for metrics form perf_events_statements
#   perf_events_statements_digest_text_limit  = 120
#   perf_events_statements_limit              = 250
#   perf_events_statements_time_limit         = 86400
#   #
#   ## if the list is empty, then metrics are gathered from all databasee tables
#   table_schema_databases                    = []
#   #
#   ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
#   gather_table_schema                       = false
#   #
#   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
#   gather_process_list                       = true
#   #
#   ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
#   gather_user_statistics                    = true
#   #
#   ## gather auto_increment columns and max values from information schema
#   gather_info_schema_auto_inc               = true
#   #
#   ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
#   gather_innodb_metrics                     = true
#   #
#   ## gather metrics from SHOW SLAVE STATUS command output
#   gather_slave_status                       = true
#   #
#   ## gather metrics from SHOW BINARY LOGS command output
#   gather_binary_logs                        = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
#   gather_table_io_waits                     = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
#   gather_table_lock_waits                   = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
#   gather_index_io_waits                     = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
#   gather_event_waits                        = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
#   gather_file_events_stats                  = false
#   #
#   ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
#   gather_perf_events_statements             = false
#   #
#   ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
#   interval_slow                   = "30m"
#
#   ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Provides metrics about the state of a NATS server
# [[inputs.nats]]
#   ## The address of the monitoring endpoint of the NATS server
#   server = "http://localhost:8222"
#
#   ## Maximum time to receive response
#   # response_timeout = "5s"


# # Read metrics about network interface usage
# [[inputs.net]]
#   ## By default, telegraf gathers stats from any up interface (excluding loopback)
#   ## Setting interfaces will tell it to gather these explicit interfaces,
#   ## regardless of status.
#   ##
#   # interfaces = ["eth0"]
#   ##
#   ## On linux systems telegraf also collects protocol stats.
#   ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
#   ##
#   # ignore_protocol_stats = false
#   ##


# # Collect response time of a TCP or UDP connection
# [[inputs.net_response]]
#   ## Protocol, must be "tcp" or "udp"
#   ## NOTE: because the "udp" protocol does not respond to requests, it requires
#   ## a send/expect string pair (see below).
#   protocol = "tcp"
#   ## Server address (default localhost)
#   address = "localhost:80"
#
#   ## Set timeout
#   # timeout = "1s"
#
#   ## Set read timeout (only used if expecting a response)
#   # read_timeout = "1s"
#
#   ## The following options are required for UDP checks. For TCP, they are
#   ## optional. The plugin will send the given string to the server and then
#   ## expect to receive the given 'expect' string back.
#   ## string sent to the server
#   # send = "ssh"
#   ## expected string in answer
#   # expect = "ssh"
#
#   ## Uncomment to remove deprecated fields
#   # fieldexclude = ["result_type", "string_found"]


# # Read TCP metrics such as established, time wait and sockets counts.
# [[inputs.netstat]]
#   # no configuration


# # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]]
#   # An array of Nginx stub_status URI to gather stats.
#   urls = ["http://localhost/server_status"]
#
#   ## Optional TLS Config
#   tls_ca = "/etc/telegraf/ca.pem"
#   tls_cert = "/etc/telegraf/cert.cer"
#   tls_key = "/etc/telegraf/key.key"
#   ## Use TLS but skip chain & host verification
#   insecure_skip_verify = false
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"


# # Read Nginx Plus' full status information (ngx_http_status_module)
# [[inputs.nginx_plus]]
#   ## An array of ngx_http_status_module or status URI to gather stats.
#   urls = ["http://localhost/status"]
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"


# # Read Nginx Plus Api documentation
# [[inputs.nginx_plus_api]]
#   ## An array of API URI to gather stats.
#   urls = ["http://localhost/api"]
#
#   # Nginx API version, default: 3
#   # api_version = 3
#
#   # HTTP response timeout (default: 5s)
#   response_timeout = "5s"


# # Read Nginx virtual host traffic status module information (nginx-module-vts)
# [[inputs.nginx_vts]]
#   ## An array of ngx_http_status_module or status URI to gather stats.
#   urls = ["http://localhost/status"]
#
#   ## HTTP response timeout (default: 5s)
#   response_timeout = "5s"


# # Read NSQ topic and channel statistics.
# [[inputs.nsq]]
#   ## An array of NSQD HTTP API endpoints
#   endpoints  = ["http://localhost:4151"]
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Collect kernel snmp counters and network interface statistics
# [[inputs.nstat]]
#   ## file paths for proc files. If empty default paths will be used:
#   ##    /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
#   ## These can also be overridden with env variables, see README.
#   proc_net_netstat = "/proc/net/netstat"
#   proc_net_snmp = "/proc/net/snmp"
#   proc_net_snmp6 = "/proc/net/snmp6"
#   ## dump metrics with 0 values too
#   dump_zeros       = true


# # Get standard NTP query metrics, requires ntpq executable.
# [[inputs.ntpq]]
#   ## If false, set the -n ntpq flag. Can reduce metric gather time.
#   dns_lookup = true


# # Pulls statistics from nvidia GPUs attached to the host
# [[inputs.nvidia_smi]]
#   ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
#   # bin_path = "/usr/bin/nvidia-smi"
#
#   ## Optional: timeout for GPU polling
#   # timeout = "5s"


# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
#   host = "localhost"
#   port = 389
#
#   # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
#   # note that port will likely need to be changed to 636 for ldaps
#   # valid options: "" | "starttls" | "ldaps"
#   tls = ""
#
#   # skip peer certificate verification. Default is false.
#   insecure_skip_verify = false
#
#   # Path to PEM-encoded Root certificate to use to verify server certificate
#   tls_ca = "/etc/ssl/certs.pem"
#
#   # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
#   bind_dn = ""
#   bind_password = ""
#
#   # Reverse metric names so they sort more naturally. Recommended.
#   # This defaults to false if unset, but is set to true when generating a new config
#   reverse_metric_names = true


# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
# [[inputs.opensmtpd]]
#   ## If running as a restricted user you can prepend sudo for additional access:
#   #use_sudo = false
#
#   ## The default location of the smtpctl binary can be overridden with:
#   binary = "/usr/sbin/smtpctl"
#
#   ## The default timeout of 1000ms can be overriden with (in milliseconds):
#   timeout = 1000


# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
#   ## Path of passenger-status.
#   ##
#   ## Plugin gather metric via parsing XML output of passenger-status
#   ## More information about the tool:
#   ##   https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
#   ##
#   ## If no path is specified, then the plugin simply execute passenger-status
#   ## hopefully it can be found in your PATH
#   command = "passenger-status -v --show=xml"


# # Gather counters from PF
# [[inputs.pf]]
#   ## PF require root access on most systems.
#   ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
#   ## Users must configure sudo to allow telegraf user to run pfctl with no password.
#   ## pfctl can be restricted to only list command "pfctl -s info".
#   use_sudo = false


# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
#   ## An array of addresses to gather stats about. Specify an ip or hostname
#   ## with optional port and path
#   ##
#   ## Plugin can be configured in three modes (either can be used):
#   ##   - http: the URL must start with http:// or https://, ie:
#   ##       "http://localhost/status"
#   ##       "http://192.168.130.1/status?full"
#   ##
#   ##   - unixsocket: path to fpm socket, ie:
#   ##       "/var/run/php5-fpm.sock"
#   ##      or using a custom fpm status path:
#   ##       "/var/run/php5-fpm.sock:fpm-custom-status-path"
#   ##
#   ##   - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
#   ##       "fcgi://10.0.0.12:9000/status"
#   ##       "cgi://10.0.10.12:9001/status"
#   ##
#   ## Example of multiple gathering from local socket and remove host
#   ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
#   urls = ["http://localhost/status"]


# # Ping given url(s) and return statistics
# [[inputs.ping]]
#   ## List of urls to ping
#   urls = ["example.org"]
#
#   ## Number of pings to send per collection (ping -c <COUNT>)
#   # count = 1
#
#   ## Interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
#   ## Not available in Windows.
#   # ping_interval = 1.0
#
#   ## Per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
#   # timeout = 1.0
#
#   ## Total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
#   # deadline = 10
#
#   ## Interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
#   ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
#   # interface = ""
#
#   ## Specify the ping executable binary, default is "ping"
#   # binary = "ping"
#
#   ## Arguments for ping command
#   ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
#   # arguments = ["-c", "3"]


# # Measure postfix queue statistics
# [[inputs.postfix]]
#   ## Postfix queue directory. If not provided, telegraf will try to use
#   ## 'postconf -h queue_directory' to determine it.
#   # queue_directory = "/var/spool/postfix"


# # Read metrics from one or many PowerDNS servers
# [[inputs.powerdns]]
#   ## An array of sockets to gather stats about.
#   ## Specify a path to unix socket.
#   unix_sockets = ["/var/run/pdns.controlsocket"]


# # Monitor process cpu and memory usage
# [[inputs.procstat]]
#   ## PID file to monitor process
#   pid_file = "/var/run/nginx.pid"
#   ## executable name (ie, pgrep <exe>)
#   # exe = "nginx"
#   ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
#   # pattern = "nginx"
#   ## user as argument for pgrep (ie, pgrep -u <user>)
#   # user = "nginx"
#   ## Systemd unit name
#   # systemd_unit = "nginx.service"
#   ## CGroup name or path
#   # cgroup = "systemd/system.slice/nginx.service"
#
#   ## Windows service name
#   # win_service = ""
#
#   ## override for process_name
#   ## This is optional; default is sourced from /proc/<pid>/status
#   # process_name = "bar"
#
#   ## Field name prefix
#   # prefix = ""
#
#   ## Add PID as a tag instead of a field; useful to differentiate between
#   ## processes whose tags are otherwise the same.  Can create a large number
#   ## of series, use judiciously.
#   # pid_tag = false
#
#   ## Method to use when finding process IDs.  Can be one of 'pgrep', or
#   ## 'native'.  The pgrep finder calls the pgrep executable in the PATH while
#   ## the native finder performs the search directly in a manor dependent on the
#   ## platform.  Default is 'pgrep'
#   # pid_finder = "pgrep"


# # Reads last_run_summary.yaml file and converts to measurments
# [[inputs.puppetagent]]
#   ## Location of puppet last run summary file
#   location = "/var/lib/puppet/state/last_run_summary.yaml"


# # Reads metrics from RabbitMQ servers via the Management Plugin
# [[inputs.rabbitmq]]
#   ## Management Plugin url. (default: http://localhost:15672)
#   # url = "http://localhost:15672"
#   ## Tag added to rabbitmq_overview series; deprecated: use tags
#   # name = "rmq-server-1"
#   ## Credentials
#   # username = "guest"
#   # password = "guest"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional request timeouts
#   ##
#   ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
#   ## for a server's response headers after fully writing the request.
#   # header_timeout = "3s"
#   ##
#   ## client_timeout specifies a time limit for requests made by this client.
#   ## Includes connection time, any redirects, and reading the response body.
#   # client_timeout = "4s"
#
#   ## A list of nodes to gather as the rabbitmq_node measurement. If not
#   ## specified, metrics for all nodes are gathered.
#   # nodes = ["rabbit@node1", "rabbit@node2"]
#
#   ## A list of queues to gather as the rabbitmq_queue measurement. If not
#   ## specified, metrics for all queues are gathered.
#   # queues = ["telegraf"]
#
#   ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
#   ## specified, metrics for all exchanges are gathered.
#   # exchanges = ["telegraf"]
#
#   ## Queues to include and exclude. Globs accepted.
#   ## Note that an empty array for both will include all queues
#   queue_name_include = []
#   queue_name_exclude = []


# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
# [[inputs.raindrops]]
#   ## An array of raindrops middleware URI to gather stats.
#   urls = ["http://localhost:8080/_raindrops"]


# # Read metrics from one or many redis servers
# [[inputs.redis]]
#   ## specify servers via a url matching:
#   ##  [protocol://][:password]@address[:port]
#   ##  e.g.
#   ##    tcp://localhost:6379
#   ##    tcp://:password@192.168.99.100
#   ##    unix:///var/run/redis.sock
#   ##
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no port is specified, 6379 is used
#   servers = ["tcp://localhost:6379"]
#
#   ## specify server password
#   # password = "s#cr@t%"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = true


# # Read metrics from one or many RethinkDB servers
# [[inputs.rethinkdb]]
#   ## An array of URI to gather stats about. Specify an ip or hostname
#   ## with optional port add password. ie,
#   ##   rethinkdb://user:auth_key@10.10.3.30:28105,
#   ##   rethinkdb://10.10.3.33:18832,
#   ##   10.0.0.1:10000, etc.
#   servers = ["127.0.0.1:28015"]
#   ##
#   ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
#   ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
#   # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
#   ##
#   ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
#   ## have to be named "rethinkdb".
#   # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]


# # Read metrics one or many Riak servers
# [[inputs.riak]]
#   # Specify a list of one or more riak http servers
#   servers = ["http://localhost:8098"]


# # Read API usage and limits for a Salesforce organisation
# [[inputs.salesforce]]
#   ## specify your credentials
#   ##
#   username = "your_username"
#   password = "your_password"
#   ##
#   ## (optional) security token
#   # security_token = "your_security_token"
#   ##
#   ## (optional) environment type (sandbox or production)
#   ## default is: production
#   ##
#   # environment = "production"
#   ##
#   ## (optional) API version (default: "39.0")
#   ##
#   # version = "39.0"


# # Monitor sensors, requires lm-sensors package
# [[inputs.sensors]]
#   ## Remove numbers from field names.
#   ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
#   # remove_numbers = true
#
#   ## Timeout is the maximum amount of time that the sensors command can run.
#   # timeout = "5s"


# # Read metrics from storage devices supporting S.M.A.R.T.
# [[inputs.smart]]
#   ## Optionally specify the path to the smartctl executable
#   # path = "/usr/bin/smartctl"
#   #
#   ## On most platforms smartctl requires root access.
#   ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
#   ## Sudo must be configured to to allow the telegraf user to run smartctl
#   ## with out password.
#   # use_sudo = false
#   #
#   ## Skip checking disks in this power mode. Defaults to
#   ## "standby" to not wake up disks that have stoped rotating.
#   ## See --nocheck in the man pages for smartctl.
#   ## smartctl version 5.41 and 5.42 have faulty detection of
#   ## power mode and might require changing this value to
#   ## "never" depending on your disks.
#   # nocheck = "standby"
#   #
#   ## Gather detailed metrics for each SMART Attribute.
#   ## Defaults to "false"
#   ##
#   # attributes = false
#   #
#   ## Optionally specify devices to exclude from reporting.
#   # excludes = [ "/dev/pass6" ]
#   #
#   ## Optionally specify devices and device type, if unset
#   ## a scan (smartctl --scan) for S.M.A.R.T. devices will
#   ## done and all found will be included except for the
#   ## excluded in excludes.
#   # devices = [ "/dev/ada0 -d atacam" ]


# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
#   agents = [ "127.0.0.1:161" ]
#   ## Timeout for each SNMP query.
#   timeout = "5s"
#   ## Number of retries to attempt within timeout.
#   retries = 3
#   ## SNMP version, values can be 1, 2, or 3
#   version = 2
#
#   ## SNMP community string.
#   community = "public"
#
#   ## The GETBULK max-repetitions parameter
#   max_repetitions = 10
#
#   ## SNMPv3 auth parameters
#   #sec_name = "myuser"
#   #auth_protocol = "md5"      # Values: "MD5", "SHA", ""
#   #auth_password = "pass"
#   #sec_level = "authNoPriv"   # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
#   #context_name = ""
#   #priv_protocol = ""         # Values: "DES", "AES", ""
#   #priv_password = ""
#
#   ## measurement name
#   name = "system"
#   [[inputs.snmp.field]]
#     name = "hostname"
#     oid = ".1.0.0.1.1"
#   [[inputs.snmp.field]]
#     name = "uptime"
#     oid = ".1.0.0.1.2"
#   [[inputs.snmp.field]]
#     name = "load"
#     oid = ".1.0.0.1.3"
#   [[inputs.snmp.field]]
#     oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
#   [[inputs.snmp.table]]
#     ## measurement name
#     name = "remote_servers"
#     inherit_tags = [ "hostname" ]
#     [[inputs.snmp.table.field]]
#       name = "server"
#       oid = ".1.0.0.0.1.0"
#       is_tag = true
#     [[inputs.snmp.table.field]]
#       name = "connections"
#       oid = ".1.0.0.0.1.1"
#     [[inputs.snmp.table.field]]
#       name = "latency"
#       oid = ".1.0.0.0.1.2"
#
#   [[inputs.snmp.table]]
#     ## auto populate table's fields using the MIB
#     oid = "HOST-RESOURCES-MIB::hrNetworkTable"


# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
#   ## Use 'oids.txt' file to translate oids to names
#   ## To generate 'oids.txt' you need to run:
#   ##   snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
#   ## Or if you have an other MIB folder with custom MIBs
#   ##   snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
#   snmptranslate_file = "/tmp/oids.txt"
#   [[inputs.snmp.host]]
#     address = "192.168.2.2:161"
#     # SNMP community
#     community = "public" # default public
#     # SNMP version (1, 2 or 3)
#     # Version 3 not supported yet
#     version = 2 # default 2
#     # SNMP response timeout
#     timeout = 2.0 # default 2.0
#     # SNMP request retries
#     retries = 2 # default 2
#     # Which get/bulk do you want to collect for this host
#     collect = ["mybulk", "sysservices", "sysdescr"]
#     # Simple list of OIDs to get, in addition to "collect"
#     get_oids = []
#
#   [[inputs.snmp.host]]
#     address = "192.168.2.3:161"
#     community = "public"
#     version = 2
#     timeout = 2.0
#     retries = 2
#     collect = ["mybulk"]
#     get_oids = [
#         "ifNumber",
#         ".1.3.6.1.2.1.1.3.0",
#     ]
#
#   [[inputs.snmp.get]]
#     name = "ifnumber"
#     oid = "ifNumber"
#
#   [[inputs.snmp.get]]
#     name = "interface_speed"
#     oid = "ifSpeed"
#     instance = "0"
#
#   [[inputs.snmp.get]]
#     name = "sysuptime"
#     oid = ".1.3.6.1.2.1.1.3.0"
#     unit = "second"
#
#   [[inputs.snmp.bulk]]
#     name = "mybulk"
#     max_repetition = 127
#     oid = ".1.3.6.1.2.1.1"
#
#   [[inputs.snmp.bulk]]
#     name = "ifoutoctets"
#     max_repetition = 127
#     oid = "ifOutOctets"
#
#   [[inputs.snmp.host]]
#     address = "192.168.2.13:161"
#     #address = "127.0.0.1:161"
#     community = "public"
#     version = 2
#     timeout = 2.0
#     retries = 2
#     #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
#     collect = ["sysuptime" ]
#     [[inputs.snmp.host.table]]
#       name = "iftable3"
#       include_instances = ["enp5s0", "eth1"]
#
#   # SNMP TABLEs
#   # table without mapping neither subtables
#   [[inputs.snmp.table]]
#     name = "iftable1"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#
#   # table without mapping but with subtables
#   [[inputs.snmp.table]]
#     name = "iftable2"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
#
#   # table with mapping but without subtables
#   [[inputs.snmp.table]]
#     name = "iftable3"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     # if empty. get all instances
#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
#     # if empty, get all subtables
#
#   # table with both mapping and subtables
#   [[inputs.snmp.table]]
#     name = "iftable4"
#     oid = ".1.3.6.1.2.1.31.1.1.1"
#     # if empty get all instances
#     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
#     # if empty get all subtables
#     # sub_tables could be not "real subtables"
#     sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]


# # Read stats from one or more Solr servers or cores
# [[inputs.solr]]
#   ## specify a list of one or more Solr servers
#   servers = ["http://localhost:8983"]
#
#   ## specify a list of one or more Solr cores (default - all)
#   # cores = ["main"]


# # Read metrics from Microsoft SQL Server
# [[inputs.sqlserver]]
#   ## Specify instances to monitor with a list of connection strings.
#   ## All connection parameters are optional.
#   ## By default, the host is localhost, listening on default port, TCP 1433.
#   ##   for Windows, the user is the currently running AD user (SSO).
#   ##   See https://github.com/denisenkom/go-mssqldb for detailed connection
#   ##   parameters.
#   # servers = [
#   #  "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
#   # ]
#
#   ## Optional parameter, setting this to 2 will use a new version
#   ## of the collection queries that break compatibility with the original
#   ## dashboards.
#   query_version = 2
#
#   ## If you are using AzureDB, setting this to true will gather resource utilization metrics
#   # azuredb = false
#
#   ## If you would like to exclude some of the metrics queries, list them here
#   ## Possible choices:
#   ## - PerformanceCounters
#   ## - WaitStatsCategorized
#   ## - DatabaseIO
#   ## - DatabaseProperties
#   ## - CPUHistory
#   ## - DatabaseSize
#   ## - DatabaseStats
#   ## - MemoryClerk
#   ## - VolumeSpace
#   ## - PerformanceMetrics
#   # exclude_query = [ 'DatabaseIO' ]


# # Sysstat metrics collector
# [[inputs.sysstat]]
#   ## Path to the sadc command.
#   #
#   ## Common Defaults:
#   ##   Debian/Ubuntu: /usr/lib/sysstat/sadc
#   ##   Arch:          /usr/lib/sa/sadc
#   ##   RHEL/CentOS:   /usr/lib64/sa/sadc
#   sadc_path = "/usr/lib/sa/sadc" # required
#   #
#   #
#   ## Path to the sadf command, if it is not in PATH
#   # sadf_path = "/usr/bin/sadf"
#   #
#   #
#   ## Activities is a list of activities, that are passed as argument to the
#   ## sadc collector utility (e.g: DISK, SNMP etc...)
#   ## The more activities that are added, the more data is collected.
#   # activities = ["DISK"]
#   #
#   #
#   ## Group metrics to measurements.
#   ##
#   ## If group is false each metric will be prefixed with a description
#   ## and represents itself a measurement.
#   ##
#   ## If Group is true, corresponding metrics are grouped to a single measurement.
#   # group = true
#   #
#   #
#   ## Options for the sadf command. The values on the left represent the sadf
#   ## options and the values on the right their description (which are used for
#   ## grouping and prefixing metrics).
#   ##
#   ## Run 'sar -h' or 'man sar' to find out the supported options for your
#   ## sysstat version.
#   [inputs.sysstat.options]
#     -C = "cpu"
#     -B = "paging"
#     -b = "io"
#     -d = "disk"             # requires DISK activity
#     "-n ALL" = "network"
#     "-P ALL" = "per_cpu"
#     -q = "queue"
#     -R = "mem"
#     -r = "mem_util"
#     -S = "swap_util"
#     -u = "cpu_util"
#     -v = "inode"
#     -W = "swap"
#     -w = "task"
#   #  -H = "hugepages"        # only available for newer linux distributions
#   #  "-I ALL" = "interrupts" # requires INT activity
#   #
#   #
#   ## Device tags can be used to add additional tags for devices.
#   ## For example the configuration below adds a tag vg with value rootvg for
#   ## all metrics with sda devices.
#   # [[inputs.sysstat.device_tags.sda]]
#   #  vg = "rootvg"


# # Reads metrics from a Teamspeak 3 Server via ServerQuery
# [[inputs.teamspeak]]
#   ## Server address for Teamspeak 3 ServerQuery
#   # server = "127.0.0.1:10011"
#   ## Username for ServerQuery
#   username = "serverqueryuser"
#   ## Password for ServerQuery
#   password = "secret"
#   ## Array of virtual servers
#   # virtual_servers = [1]


# # Read metrics about temperature
# [[inputs.temp]]
#   # no configuration


# # Read Tengine's basic status information (ngx_http_reqstat_module)
# [[inputs.tengine]]
#   # An array of Tengine reqstat module URI to gather stats.
#   urls = ["http://127.0.0.1/us"]
#
#   # HTTP response timeout (default: 5s)
#   # response_timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.cer"
#   # tls_key = "/etc/telegraf/key.key"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
#   ## URL of the Tomcat server status
#   # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
#
#   ## HTTP Basic Auth Credentials
#   # username = "tomcat"
#   # password = "s3cret"
#
#   ## Request timeout
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]]
#   ## Set the amplitude
#   amplitude = 10.0


# # Read Twemproxy stats data
# [[inputs.twemproxy]]
#   ## Twemproxy stats address and port (no scheme)
#   addr = "localhost:22222"
#   ## Monitor pool name
#   pools = ["redis_pool", "mc_pool"]


# # A plugin to collect stats from the Unbound DNS resolver
# [[inputs.unbound]]
#   ## Address of server to connect to, read from unbound conf default, optionally ':port'
#   ## Will lookup IP if given a hostname
#   server = "127.0.0.1:8953"
#
#   ## If running as a restricted user you can prepend sudo for additional access:
#   # use_sudo = false
#
#   ## The default location of the unbound-control binary can be overridden with:
#   # binary = "/usr/sbin/unbound-control"
#
#   ## The default timeout of 1s can be overriden with:
#   # timeout = "1s"
#
#   ## When set to true, thread metrics are tagged with the thread id.
#   ##
#   ## The default is false for backwards compatibility, and will be change to
#   ## true in a future version.  It is recommended to set to true on new
#   ## deployments.
#   thread_as_tag = false


# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
#   ## If running as a restricted user you can prepend sudo for additional access:
#   #use_sudo = false
#
#   ## The default location of the varnishstat binary can be overridden with:
#   binary = "/usr/bin/varnishstat"
#
#   ## By default, telegraf gather stats for 3 metric points.
#   ## Setting stats will override the defaults shown below.
#   ## Glob matching can be used, ie, stats = ["MAIN.*"]
#   ## stats may also be set to ["*"], which will collect all stats
#   stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
#
#   ## Optional name for the varnish instance (or working directory) to query
#   ## Usually appened after -n in varnish cli
#   # instance_name = instanceName


# # Monitor wifi signal strength and quality
# [[inputs.wireless]]
#   ## Sets 'proc' directory path
#   ## If not specified, then default is /proc
#   # host_proc = "/proc"


# # Reads metrics from a SSL certificate
# [[inputs.x509_cert]]
#   ## List certificate sources
#   sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
#
#   ## Timeout for SSL connection
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
# [[inputs.zfs]]
#   ## ZFS kstat path. Ignored on FreeBSD
#   ## If not specified, then default is:
#   # kstatPath = "/proc/spl/kstat/zfs"
#
#   ## By default, telegraf gather all zfs stats
#   ## If not specified, then default is:
#   # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#   ## For Linux, the default is:
#   # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
#   #   "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
#   ## By default, don't gather zpool stats
#   # poolMetrics = false


# # Reads 'mntr' stats from one or many zookeeper servers
# [[inputs.zookeeper]]
#   ## An array of address to gather stats about. Specify an ip or hostname
#   ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
#
#   ## If no servers are specified, then localhost is used as the host.
#   ## If no port is specified, 2181 is used
#   servers = [":2181"]
#
#   ## Timeout for metric collections from all servers.  Minimum timeout is "1s".
#   # timeout = "5s"
#
#   ## Optional TLS Config
#   # enable_tls = true
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## If false, skip chain & host verification
#   # insecure_skip_verify = true



###############################################################################
#                            SERVICE INPUT PLUGINS                            #
###############################################################################

# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
#   ## Broker to consume from.
#   ##   deprecated in 1.7; use the brokers option
#   # url = "amqp://localhost:5672/influxdb"
#
#   ## Brokers to consume from.  If multiple brokers are specified a random broker
#   ## will be selected anytime a connection is established.  This can be
#   ## helpful for load balancing when not using a dedicated load balancer.
#   brokers = ["amqp://localhost:5672/influxdb"]
#
#   ## Authentication credentials for the PLAIN auth_method.
#   # username = ""
#   # password = ""
#
#   ## Exchange to declare and consume from.
#   exchange = "telegraf"
#
#   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
#   # exchange_type = "topic"
#
#   ## If true, exchange will be passively declared.
#   # exchange_passive = false
#
#   ## Exchange durability can be either "transient" or "durable".
#   # exchange_durability = "durable"
#
#   ## Additional exchange arguments.
#   # exchange_arguments = { }
#   # exchange_arguments = {"hash_propery" = "timestamp"}
#
#   ## AMQP queue name.
#   queue = "telegraf"
#
#   ## AMQP queue durability can be "transient" or "durable".
#   queue_durability = "durable"
#
#   ## Binding Key.
#   binding_key = "#"
#
#   ## Maximum number of messages server should give to the worker.
#   # prefetch_count = 50
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Auth method. PLAIN and EXTERNAL are supported
#   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
#   ## described here: https://www.rabbitmq.com/plugins.html
#   # auth_method = "PLAIN"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
#   ## DEPRECATED: The cassandra plugin has been deprecated.  Please use the
#   ## jolokia2 plugin instead.
#   ##
#   ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
#   context = "/jolokia/read"
#   ## List of cassandra servers exposing jolokia read service
#   servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
#   ## List of metrics collected on above servers
#   ## Each metric consists of a jmx path.
#   ## This will collect all heap memory usage metrics from the jvm and
#   ## ReadLatency metrics for all keyspaces and tables.
#   ## "type=Table" in the query works with Cassandra3.0. Older versions might
#   ## need to use "type=ColumnFamily"
#   metrics  = [
#     "/java.lang:type=Memory/HeapMemoryUsage",
#     "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
#   ]


# # Influx HTTP write listener
# [[inputs.http_listener]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8186"
#
#   ## maximum duration before timing out read of the request
#   read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
#   max_body_size = "500MiB"
#
#   ## Maximum line size allowed to be sent in bytes.
#   ## 0 means to use the default of 65536 bytes (64 kibibytes)
#   max_line_size = "64KiB"
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   tls_cert = "/etc/telegraf/cert.pem"
#   tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"


# # Generic HTTP write listener
# [[inputs.http_listener_v2]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8080"
#
#   ## Path to listen to.
#   # path = "/telegraf"
#
#   ## HTTP methods to accept.
#   # methods = ["POST", "PUT"]
#
#   ## maximum duration before timing out read of the request
#   # read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   # write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
#   # max_body_size = "500MB"
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Influx HTTP write listener
# [[inputs.influxdb_listener]]
#   ## Address and port to host HTTP listener on
#   service_address = ":8186"
#
#   ## maximum duration before timing out read of the request
#   read_timeout = "10s"
#   ## maximum duration before timing out write of the response
#   write_timeout = "10s"
#
#   ## Maximum allowed http request body size in bytes.
#   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
#   max_body_size = "500MiB"
#
#   ## Maximum line size allowed to be sent in bytes.
#   ## 0 means to use the default of 65536 bytes (64 kibibytes)
#   max_line_size = "64KiB"
#
#   ## Set one or more allowed client CA certificate file names to
#   ## enable mutually authenticated TLS connections
#   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Add service certificate and key
#   tls_cert = "/etc/telegraf/cert.pem"
#   tls_key = "/etc/telegraf/key.pem"
#
#   ## Optional username and password to accept for HTTP basic authentication.
#   ## You probably want to make sure you have TLS configured above for this.
#   # basic_username = "foobar"
#   # basic_password = "barfoo"


# # Read JTI OpenConfig Telemetry from listed sensors
# [[inputs.jti_openconfig_telemetry]]
#   ## List of device addresses to collect telemetry from
#   servers = ["localhost:1883"]
#
#   ## Authentication details. Username and password are must if device expects
#   ## authentication. Client ID must be unique when connecting from multiple instances
#   ## of telegraf to the same device
#   username = "user"
#   password = "pass"
#   client_id = "telegraf"
#
#   ## Frequency to get data
#   sample_frequency = "1000ms"
#
#   ## Sensors to subscribe for
#   ## A identifier for each sensor can be provided in path by separating with space
#   ## Else sensor path will be used as identifier
#   ## When identifier is used, we can provide a list of space separated sensors.
#   ## A single subscription will be created with all these sensors and data will
#   ## be saved to measurement with this identifier name
#   sensors = [
#    "/interfaces/",
#    "collection /components/ /lldp",
#   ]
#
#   ## We allow specifying sensor group level reporting rate. To do this, specify the
#   ## reporting rate in Duration at the beginning of sensor paths / collection
#   ## name. For entries without reporting rate, we use configured sample frequency
#   sensors = [
#    "1000ms customReporting /interfaces /lldp",
#    "2000ms collection /components",
#    "/interfaces",
#   ]
#
#   ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
#   ## channel will be opened with server
#   ssl_cert = "/etc/telegraf/cert.pem"
#
#   ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
#   ## Failed streams/calls will not be retried if 0 is provided
#   retry_delay = "1000ms"
#
#   ## To treat all string values as tags, set this to true
#   str_as_tags = false


# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
#   ## kafka servers
#   brokers = ["localhost:9092"]
#   ## topic(s) to consume
#   topics = ["telegraf"]
#
#   ## Optional Client id
#   # client_id = "Telegraf"
#
#   ## Set the minimal supported Kafka version.  Setting this enables the use of new
#   ## Kafka features and APIs.  Of particular interest, lz4 compression
#   ## requires at least version 0.10.0.0.
#   ##   ex: version = "1.1.0"
#   # version = ""
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Optional SASL Config
#   # sasl_username = "kafka"
#   # sasl_password = "secret"
#
#   ## the name of the consumer group
#   consumer_group = "telegraf_metrics_consumers"
#   ## Offset (must be either "oldest" or "newest")
#   offset = "oldest"
#   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
#   ## larger messages are dropped
#   max_message_len = 1000000
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer_legacy]]
#   ## topic(s) to consume
#   topics = ["telegraf"]
#   ## an array of Zookeeper connection strings
#   zookeeper_peers = ["localhost:2181"]
#   ## Zookeeper Chroot
#   zookeeper_chroot = ""
#   ## the name of the consumer group
#   consumer_group = "telegraf_metrics_consumers"
#   ## Offset (must be either "oldest" or "newest")
#   offset = "oldest"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"
#
#   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
#   ## larger messages are dropped
#   max_message_len = 65536


# # Stream and parse log file(s).
# [[inputs.logparser]]
#   ## Log files to parse.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   /var/log/**.log     -> recursively find all .log files in /var/log
#   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
#   ##   /var/log/apache.log -> only tail the apache log file
#   files = ["/var/log/apache/access.log"]
#
#   ## Read files that currently exist from the beginning. Files that are created
#   ## while telegraf is running (and that match the "files" globs) will always
#   ## be read from the beginning.
#   from_beginning = false
#
#   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
#   # watch_method = "inotify"
#
#   ## Parse logstash-style "grok" patterns:
#   [inputs.logparser.grok]
#     ## This is a list of patterns to check the given log file(s) for.
#     ## Note that adding patterns here increases processing time. The most
#     ## efficient configuration is to have one pattern per logparser.
#     ## Other common built-in patterns are:
#     ##   %{COMMON_LOG_FORMAT}   (plain apache & nginx access logs)
#     ##   %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
#     patterns = ["%{COMBINED_LOG_FORMAT}"]
#
#     ## Name of the outputted measurement name.
#     measurement = "apache_access_log"
#
#     ## Full path(s) to custom pattern files.
#     custom_pattern_files = []
#
#     ## Custom patterns can also be defined here. Put one pattern per line.
#     custom_patterns = '''
#     '''
#
#     ## Timezone allows you to provide an override for timestamps that
#     ## don't already include an offset
#     ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
#     ##
#     ## Default: "" which renders UTC
#     ## Options are as follows:
#     ##   1. Local             -- interpret based on machine localtime
#     ##   2. "Canada/Eastern"  -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
#     ##   3. UTC               -- or blank/unspecified, will return timestamp in UTC
#     # timezone = "Canada/Eastern"


# # Read metrics from MQTT topic(s)
 [[inputs.mqtt_consumer]]
#   ## MQTT broker URLs to be used. The format should be scheme://host:port,
#   ## schema can be tcp, ssl, or ws.
   servers = ["test.mosquitto.org:1883"]
#
#   ## QoS policy for messages
#   ##   0 = at most once
#   ##   1 = at least once
#   ##   2 = exactly once
#   ##
#   ## When using a QoS of 1 or 2, you should enable persistent_session to allow
#   ## resuming unacknowledged messages.
  qos = 0
#
#   ## Connection timeout for initial connection in seconds
  connection_timeout = "30s"
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
  ## Topics to subscribe to
  topics = [
    "wcl/ArgonDisplay",
  ]
#
#   # if true, messages that can't be delivered while the subscriber is offline
#   # will be delivered when it comes back (such as on service restart).
#   # NOTE: if true, client_id MUST be set
#   persistent_session = false
#   # If empty, a random client ID will be generated.
   client_id = "klmMac"
#
#   ## username and password to connect MQTT server.
#   # username = "telegraf"
#   # password = "metricsmetricsmetricsmetrics"
#
#   ## Optional TLS Config
#   # tls_ca = "/etc/telegraf/ca.pem"
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

  data_format = "value"
  data_type = "string"
  tag_keys = ["temp"]


# # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]]
#   ## urls of NATS servers
#   servers = ["nats://localhost:4222"]
#   ## Use Transport Layer Security
#   secure = false
#   ## subject(s) to consume
#   subjects = ["telegraf"]
#   ## name a queue group
#   queue_group = "telegraf_consumers"
#
#   ## Sets the limits for pending msgs and bytes for each subscription
#   ## These shouldn't need to be adjusted except in very high throughput scenarios
#   # pending_message_limit = 65536
#   # pending_bytes_limit = 67108864
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
#   ## Server option still works but is deprecated, we just prepend it to the nsqd array.
#   # server = "localhost:4150"
#   ## An array representing the NSQD TCP HTTP Endpoints
#   nsqd = ["localhost:4150"]
#   ## An array representing the NSQLookupd HTTP Endpoints
#   nsqlookupd = ["localhost:4161"]
#   topic = "telegraf"
#   channel = "consumer"
#   max_in_flight = 100
#
#   ## Maximum messages to read from the broker that have not been written by an
#   ## output.  For best throughput set based on the number of metrics within
#   ## each message and the size of the output's metric_batch_size.
#   ##
#   ## For example, if each message from the queue contains 10 metrics and the
#   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
#   ## full batch is collected and the write is triggered immediately without
#   ## waiting until the next flush_interval.
#   # max_undelivered_messages = 1000
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Read metrics from one or many pgbouncer servers
# [[inputs.pgbouncer]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   ##
#   ## All connection parameters are optional.
#   ##
#   address = "host=localhost user=pgbouncer sslmode=disable"


# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   ##
#   ## All connection parameters are optional.
#   ##
#   ## Without the dbname parameter, the driver will default to a database
#   ## with the same name as the user. This dbname is just for instantiating a
#   ## connection with the server and doesn't restrict the databases we are trying
#   ## to grab metrics for.
#   ##
#   address = "host=localhost user=postgres sslmode=disable"
#   ## A custom name for the database that will be used as the "server" tag in the
#   ## measurement output. If not specified, a default one generated from
#   ## the connection address is used.
#   # outputaddress = "db01"
#
#   ## connection configuration.
#   ## maxlifetime - specify the maximum lifetime of a connection.
#   ## default is forever (0s)
#   max_lifetime = "0s"
#
#   ## A  list of databases to explicitly ignore.  If not specified, metrics for all
#   ## databases are gathered.  Do NOT use with the 'databases' option.
#   # ignored_databases = ["postgres", "template0", "template1"]
#
#   ## A list of databases to pull metrics about. If not specified, metrics for all
#   ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
#   # databases = ["app_production", "testing"]


# # Read metrics from one or many postgresql servers
# [[inputs.postgresql_extensible]]
#   ## specify address via a url matching:
#   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
#   ##       ?sslmode=[disable|verify-ca|verify-full]
#   ## or a simple string:
#   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
#   #
#   ## All connection parameters are optional.  #
#   ## Without the dbname parameter, the driver will default to a database
#   ## with the same name as the user. This dbname is just for instantiating a
#   ## connection with the server and doesn't restrict the databases we are trying
#   ## to grab metrics for.
#   #
#   address = "host=localhost user=postgres sslmode=disable"
#
#   ## connection configuration.
#   ## maxlifetime - specify the maximum lifetime of a connection.
#   ## default is forever (0s)
#   max_lifetime = "0s"
#
#   ## A list of databases to pull metrics about. If not specified, metrics for all
#   ## databases are gathered.
#   ## databases = ["app_production", "testing"]
#   #
#   ## A custom name for the database that will be used as the "server" tag in the
#   ## measurement output. If not specified, a default one generated from
#   ## the connection address is used.
#   # outputaddress = "db01"
#   #
#   ## Define the toml config where the sql queries are stored
#   ## New queries can be added, if the withdbname is set to true and there is no
#   ## databases defined in the 'databases field', the sql query is ended by a
#   ## 'is not null' in order to make the query succeed.
#   ## Example :
#   ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
#   ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
#   ## because the databases variable was set to ['postgres', 'pgbench' ] and the
#   ## withdbname was true. Be careful that if the withdbname is set to false you
#   ## don't have to define the where clause (aka with the dbname) the tagvalue
#   ## field is used to define custom tags (separated by commas)
#   ## The optional "measurement" value can be used to override the default
#   ## output measurement name ("postgresql").
#   #
#   ## Structure :
#   ## [[inputs.postgresql_extensible.query]]
#   ##   sqlquery string
#   ##   version string
#   ##   withdbname boolean
#   ##   tagvalue string (comma separated)
#   ##   measurement string
#   [[inputs.postgresql_extensible.query]]
#     sqlquery="SELECT * FROM pg_stat_database"
#     version=901
#     withdbname=false
#     tagvalue=""
#     measurement=""
#   [[inputs.postgresql_extensible.query]]
#     sqlquery="SELECT * FROM pg_stat_bgwriter"
#     version=901
#     withdbname=false
#     tagvalue="postgresql.stats"


# # Read metrics from one or many prometheus clients
# [[inputs.prometheus]]
#   ## An array of urls to scrape metrics from.
#   urls = ["http://localhost:9100/metrics"]
#
#   ## An array of Kubernetes services to scrape metrics from.
#   # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
#   ## Kubernetes config file to create client from.
#   # kube_config = "/path/to/kubernetes.config"
#
#   ## Scrape Kubernetes pods for the following prometheus annotations:
#   ## - prometheus.io/scrape: Enable scraping for this pod
#   ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
#   ##     set this to 'https' & most likely set the tls config.
#   ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
#   ## - prometheus.io/port: If port is not 9102 use this annotation
#   # monitor_kubernetes_pods = true
#
#   ## Use bearer token for authorization
#   # bearer_token = /path/to/bearer/token
#
#   ## Specify timeout duration for slower prometheus clients (default is 3s)
#   # response_timeout = "3s"
#
#   ## Optional TLS Config
#   # tls_ca = /path/to/cafile
#   # tls_cert = /path/to/certfile
#   # tls_key = /path/to/keyfile
#   ## Use TLS but skip chain & host verification
#   # insecure_skip_verify = false


# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
#   ## URL to listen on
#   # service_address = "tcp://:8094"
#   # service_address = "tcp://127.0.0.1:http"
#   # service_address = "tcp4://:8094"
#   # service_address = "tcp6://:8094"
#   # service_address = "tcp6://[2001:db8::1]:8094"
#   # service_address = "udp://:8094"
#   # service_address = "udp4://:8094"
#   # service_address = "udp6://:8094"
#   # service_address = "unix:///tmp/telegraf.sock"
#   # service_address = "unixgram:///tmp/telegraf.sock"
#
#   ## Maximum number of concurrent connections.
#   ## Only applies to stream sockets (e.g. TCP).
#   ## 0 (default) is unlimited.
#   # max_connections = 1024
#
#   ## Read timeout.
#   ## Only applies to stream sockets (e.g. TCP).
#   ## 0 (default) is unlimited.
#   # read_timeout = "30s"
#
#   ## Optional TLS configuration.
#   ## Only applies to stream sockets (e.g. TCP).
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key  = "/etc/telegraf/key.pem"
#   ## Enables client authentication if set.
#   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
#   ## Maximum socket buffer size (in bytes when no unit specified).
#   ## For stream sockets, once the buffer fills up, the sender will start backing up.
#   ## For datagram sockets, once the buffer fills up, metrics will start dropping.
#   ## Defaults to the OS default.
#   # read_buffer_size = "64KiB"
#
#   ## Period between keep alive probes.
#   ## Only applies to TCP sockets.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   # keep_alive_period = "5m"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   # data_format = "influx"


# # Statsd UDP/TCP Server
# [[inputs.statsd]]
#   ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
#   protocol = "udp"
#
#   ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
#   max_tcp_connections = 250
#
#   ## Enable TCP keep alive probes (default=false)
#   tcp_keep_alive = false
#
#   ## Specifies the keep-alive period for an active network connection.
#   ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
#   ## Defaults to the OS configuration.
#   # tcp_keep_alive_period = "2h"
#
#   ## Address and port to host UDP listener on
#   service_address = ":8125"
#
#   ## The following configuration options control when telegraf clears it's cache
#   ## of previous values. If set to false, then telegraf will only clear it's
#   ## cache when the daemon is restarted.
#   ## Reset gauges every interval (default=true)
#   delete_gauges = true
#   ## Reset counters every interval (default=true)
#   delete_counters = true
#   ## Reset sets every interval (default=true)
#   delete_sets = true
#   ## Reset timings & histograms every interval (default=true)
#   delete_timings = true
#
#   ## Percentiles to calculate for timing & histogram stats
#   percentiles = [90]
#
#   ## separator to use between elements of a statsd metric
#   metric_separator = "_"
#
#   ## Parses tags in the datadog statsd format
#   ## http://docs.datadoghq.com/guides/dogstatsd/
#   parse_data_dog_tags = false
#
#   ## Statsd data translation templates, more info can be read here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
#   # templates = [
#   #     "cpu.* measurement*"
#   # ]
#
#   ## Number of UDP messages allowed to queue up, once filled,
#   ## the statsd server will start dropping packets
#   allowed_pending_messages = 10000
#
#   ## Number of timing/histogram values to track per-measurement in the
#   ## calculation of percentiles. Raising this limit increases the accuracy
#   ## of percentiles but also increases the memory usage and cpu time.
#   percentile_limit = 1000


# # Accepts syslog messages per RFC5425
# [[inputs.syslog]]
#   ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
#   ## Protocol, address and port to host the syslog receiver.
#   ## If no host is specified, then localhost is used.
#   ## If no port is specified, 6514 is used (RFC5425#section-4.1).
#   server = "tcp://:6514"
#
#   ## TLS Config
#   # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
#   # tls_cert = "/etc/telegraf/cert.pem"
#   # tls_key = "/etc/telegraf/key.pem"
#
#   ## Period between keep alive probes.
#   ## 0 disables keep alive probes.
#   ## Defaults to the OS configuration.
#   ## Only applies to stream sockets (e.g. TCP).
#   # keep_alive_period = "5m"
#
#   ## Maximum number of concurrent connections (default = 0).
#   ## 0 means unlimited.
#   ## Only applies to stream sockets (e.g. TCP).
#   # max_connections = 1024
#
#   ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
#   ## 0 means unlimited.
#   # read_timeout = "5s"
#
#   ## Whether to parse in best effort mode or not (default = false).
#   ## By default best effort parsing is off.
#   # best_effort = false
#
#   ## Character to prepend to SD-PARAMs (default = "_").
#   ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
#   ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
#   ## For each combination a field is created.
#   ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
#   # sdparam_separator = "_"


# # Stream a log file, like the tail -f command
# [[inputs.tail]]
#   ## files to tail.
#   ## These accept standard unix glob matching rules, but with the addition of
#   ## ** as a "super asterisk". ie:
#   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
#   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
#   ##   "/var/log/apache.log" -> just tail the apache log file
#   ##
#   ## See https://github.com/gobwas/glob for more examples
#   ##
#   files = ["/var/mymetrics.out"]
#   ## Read file from beginning.
#   from_beginning = false
#   ## Whether file is a named pipe
#   pipe = false
#
#   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
#   # watch_method = "inotify"
#
#   ## Data format to consume.
#   ## Each data format has its own unique set of configuration options, read
#   ## more about them here:
#   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
#   data_format = "influx"


# # Generic TCP listener
# [[inputs.tcp_listener]]
#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
#   # socket_listener plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener


# # Generic UDP listener
# [[inputs.udp_listener]]
#   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
#   # socket_listener plugin
#   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener


# # Read metrics from VMware vCenter
# [[inputs.vsphere]]
#   ## List of vCenter URLs to be monitored. These three lines must be uncommented
#   ## and edited for the plugin to work.
#   vcenters = [ "https://vcenter.local/sdk" ]
#   username = "user@corp.local"
#   password = "secret"
#
#   ## VMs
#   ## Typical VM metrics (if omitted or empty, all metrics are collected)
#   vm_metric_include = [
#     "cpu.demand.average",
#     "cpu.idle.summation",
#     "cpu.latency.average",
#     "cpu.readiness.average",
#     "cpu.ready.summation",
#     "cpu.run.summation",
#     "cpu.usagemhz.average",
#     "cpu.used.summation",
#     "cpu.wait.summation",
#     "mem.active.average",
#     "mem.granted.average",
#     "mem.latency.average",
#     "mem.swapin.average",
#     "mem.swapinRate.average",
#     "mem.swapout.average",
#     "mem.swapoutRate.average",
#     "mem.usage.average",
#     "mem.vmmemctl.average",
#     "net.bytesRx.average",
#     "net.bytesTx.average",
#     "net.droppedRx.summation",
#     "net.droppedTx.summation",
#     "net.usage.average",
#     "power.power.average",
#     "virtualDisk.numberReadAveraged.average",
#     "virtualDisk.numberWriteAveraged.average",
#     "virtualDisk.read.average",
#     "virtualDisk.readOIO.latest",
#     "virtualDisk.throughput.usage.average",
#     "virtualDisk.totalReadLatency.average",
#     "virtualDisk.totalWriteLatency.average",
#     "virtualDisk.write.average",
#     "virtualDisk.writeOIO.latest",
#     "sys.uptime.latest",
#   ]
#   # vm_metric_exclude = [] ## Nothing is excluded by default
#   # vm_instances = true ## true by default
#
#   ## Hosts
#   ## Typical host metrics (if omitted or empty, all metrics are collected)
#   host_metric_include = [
#     "cpu.coreUtilization.average",
#     "cpu.costop.summation",
#     "cpu.demand.average",
#     "cpu.idle.summation",
#     "cpu.latency.average",
#     "cpu.readiness.average",
#     "cpu.ready.summation",
#     "cpu.swapwait.summation",
#     "cpu.usage.average",
#     "cpu.usagemhz.average",
#     "cpu.used.summation",
#     "cpu.utilization.average",
#     "cpu.wait.summation",
#     "disk.deviceReadLatency.average",
#     "disk.deviceWriteLatency.average",
#     "disk.kernelReadLatency.average",
#     "disk.kernelWriteLatency.average",
#     "disk.numberReadAveraged.average",
#     "disk.numberWriteAveraged.average",
#     "disk.read.average",
#     "disk.totalReadLatency.average",
#     "disk.totalWriteLatency.average",
#     "disk.write.average",
#     "mem.active.average",
#     "mem.latency.average",
#     "mem.state.latest",
#     "mem.swapin.average",
#     "mem.swapinRate.average",
#     "mem.swapout.average",
#     "mem.swapoutRate.average",
#     "mem.totalCapacity.average",
#     "mem.usage.average",
#     "mem.vmmemctl.average",
#     "net.bytesRx.average",
#     "net.bytesTx.average",
#     "net.droppedRx.summation",
#     "net.droppedTx.summation",
#     "net.errorsRx.summation",
#     "net.errorsTx.summation",
#     "net.usage.average",
#     "power.power.average",
#     "storageAdapter.numberReadAveraged.average",
#     "storageAdapter.numberWriteAveraged.average",
#     "storageAdapter.read.average",
#     "storageAdapter.write.average",
#     "sys.uptime.latest",
#   ]
#   # host_metric_exclude = [] ## Nothing excluded by default
#   # host_instances = true ## true by default
#
#   ## Clusters
#   # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
#   # cluster_metric_exclude = [] ## Nothing excluded by default
#   # cluster_instances = true ## true by default
#
#   ## Datastores
#   # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
#   # datastore_metric_exclude = [] ## Nothing excluded by default
#   # datastore_instances = false ## false by default for Datastores only
#
#   ## Datacenters
#   datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
#   datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
#   # datacenter_instances = false ## false by default for Datastores only
#
#   ## Plugin Settings
#   ## separator character to use for measurement and field names (default: "_")
#   # separator = "_"
#
#   ## number of objects to retreive per query for realtime resources (vms and hosts)
#   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
#   # max_query_objects = 256
#
#   ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
#   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
#   # max_query_metrics = 256
#
#   ## number of go routines to use for collection and discovery of objects and metrics
#   # collect_concurrency = 1
#   # discover_concurrency = 1
#
#   ## whether or not to force discovery of new objects on initial gather call before collecting metrics
#   ## when true for large environments this may cause errors for time elapsed while collecting metrics
#   ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
#   # force_discover_on_init = false
#
#   ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
#   # object_discovery_interval = "300s"
#
#   ## timeout applies to any of the api request made to vcenter
#   # timeout = "20s"
#
#   ## Optional SSL Config
#   # ssl_ca = "/path/to/cafile"
#   # ssl_cert = "/path/to/certfile"
#   # ssl_key = "/path/to/keyfile"
#   ## Use SSL but skip chain & host verification
#   # insecure_skip_verify = false


# # A Webhooks Event collector
# [[inputs.webhooks]]
#   ## Address and port to host Webhook listener on
#   service_address = ":1619"
#
#   [inputs.webhooks.filestack]
#     path = "/filestack"
#
#   [inputs.webhooks.github]
#     path = "/github"
#     # secret = ""
#
#   [inputs.webhooks.mandrill]
#     path = "/mandrill"
#
#   [inputs.webhooks.rollbar]
#     path = "/rollbar"
#
#   [inputs.webhooks.papertrail]
#     path = "/papertrail"
#
#   [inputs.webhooks.particle]
#     path = "/particle"


# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
# [[inputs.zipkin]]
#   # path = "/api/v1/spans" # URL path for span data
#   # port = 9411            # Port on which Telegraf listens

ini nginx的

nginx.conf
server {
        ssl on;
        listen 443 ssl;
        listen [::]:443 ssl;
        passenger_enabled on;
        ssl_certificate     /etc/nginx/sslnew/solenergy_mx.crt;
        ssl_certificate_key /etc/nginx/sslnew/solenergy_mx.key;
        server_name solenergy.mx;
        rails_env  production;

        root /opt/nginx/html/solcrm/public/;

        location = /50x.html {
              root   html;
        }
        client_max_body_size 4G;
        keepalive_timeout 10;

}

ini ssl_apache2

apache2.conf
<IfModule mod_ssl.c>
<VirtualHost *:443>
   SSLEngine on
   SSLProtocol all -SSLv2 -SSLv3
   SSLCipherSuite ALL:!DH:!EXPORT:!RC4:+HIGH:+MEDIUM:!LOW:!aNULL:!eNULL

    SSLEngine On
    SSLCertificateFile    /etc/apache2/ssl/solenergy_mx.crt
    SSLCertificateKeyFile /etc/apache2/ssl/solenergy_mx.key
    SSLCACertificateFile  /etc/apache2/ssl/solenergy_mx.ca-bundle


        ServerName solshop.solenergy.mx
        ServerAdmin webmaster@solenergy.mx
        DocumentRoot /var/www/html/solshop/

        <Directory /var/www/html/solshop/>
            Options Indexes FollowSymLinks
            AllowOverride All
            Require all granted
        </Directory>

<IfModule mod_dir.c>
            DirectoryIndex index.php index.pl index.cgi index.html index.xhtml index.htm
        </IfModule>


</VirtualHost>

</IfModule>

ini Logstashを通じてをtitanic.csv Elasticsearchに投入する

titanic.conf
input {
    file {
        path => ["/home/ladmin/titanic/*"]
        sincedb_path => ["/home/ladmin/titanic/sincedb"]
        start_position => "beginning"
        codec => plain {
            charset => "UTF-8"
        }
    }
}

filter {
    csv {
        columns => [PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked]
    }
}

output {
    elasticsearch {
        index => "passengers"
        document_id => "%{PassengerId}"
    }
}

ini Django pytest

pytest.ini
[pytest]
DJANGO_SETTINGS_MODULE=prjname.settings
python_files = tests.py test_*.py *_tests.py

pytest or pytest -s for stdout

ini Git Aliasses

.gitconfig
[alias]
    co = checkout
     ci = commit
    st = status
    br = branch
    hist = log --pretty=format:\"%h %ad | %s%d [%an]\" --graph --date=short
    type = cat-file -t
      dump = cat-file -p
        show-tree = log --oneline --graph --color --all --decorate
        
    rbm = rebase master
        rbi = rebase -i
       rb = rebase
        rbc = rebase --continue
        rba = rebase --abort
        rbs = rebsae --skip

       rs = reset
        rs-1 = reset --soft HEAD~1
        rss = git reset HEAD^

       po = push origin
        pom = push origin master

       plom = pull origin master
        plum = pull upstream master

       cob = checkout -b
        com = checkout master
        cu = "!git branch --merged | grep -v '*' | xargs -n 1 git branch -d"

       cia = commit --amend
        ciane = commit --amend --no-edit
        cim = commit -m
        cimorig = commit -c ORIG_HEAD

       a = add
        aa = add .
        ch = cherry-pick
        chc = cherry-pick --continue
        cha = cherry-pick --abort

    fup = fetch upstream
    for = fetch origin

ini Python日志配置文件

Python日志配置文件

logging.ini
[loggers]
keys=root,app

[handlers]
keys=fileHandler,consoleHandler

[formatters]
keys=defaultFormatter

[logger_root]
level=DEBUG
handlers=fileHandler

[logger_app]
level=DEBUG
handlers=consoleHandler,fileHandler
qualname=app
propagate=0

[handler_consoleHandler]
class=StreamHandler
args=(sys.stdout,)
level=DEBUG
formatter=defaultFormatter

[handler_fileHandler]
class=FileHandler
args=('logging.log',)
level=DEBUG
formatter=defaultFormatter

[formatter_defaultFormatter]
format=%(asctime)s %(name)-12s %(levelname)-8s %(message)s

ini 鱿鱼配置

鱿鱼配置

squid.conf
#	WELCOME TO SQUID 3.5.12
#	----------------------------
#	
#	This is the documentation for the Squid configuration file.
#	This documentation can also be found online at:
#		http://www.squid-cache.org/Doc/config/
#	
#	You may wish to look at the Squid home page and wiki for the
#	FAQ and other documentation:
#		http://www.squid-cache.org/
#		http://wiki.squid-cache.org/SquidFaq
#		http://wiki.squid-cache.org/ConfigExamples
#	
#	This documentation shows what the defaults for various directives
#	happen to be.  If you don't need to change the default, you should
#	leave the line out of your squid.conf in most cases.
#	
#	In some cases "none" refers to no default setting at all,
#	while in other cases it refers to the value of the option
#	- the comments for that keyword indicate if this is the case.
#

#  Configuration options can be included using the "include" directive.
#  Include takes a list of files to include. Quoting and wildcards are
#  supported.
#
#  For example,
#
#  include /path/to/included/file/squid.acl.config
#
#  Includes can be nested up to a hard-coded depth of 16 levels.
#  This arbitrary restriction is to prevent recursive include references
#  from causing Squid entering an infinite loop whilst trying to load
#  configuration files.
#
#  Values with byte units
#
#	Squid accepts size units on some size related directives. All
#	such directives are documented with a default value displaying
#	a unit.
#
#	Units accepted by Squid are:
#		bytes - byte
#		KB - Kilobyte (1024 bytes)
#		MB - Megabyte
#		GB - Gigabyte
#
#  Values with spaces, quotes, and other special characters
#
#	Squid supports directive parameters with spaces, quotes, and other
#	special characters. Surround such parameters with "double quotes". Use
#	the configuration_includes_quoted_values directive to enable or
#	disable that support.
#
#	Squid supports reading configuration option parameters from external
#	files using the syntax:
#		parameters("/path/filename")
#	For example:
#		acl whitelist dstdomain parameters("/etc/squid/whitelist.txt")
#
#  Conditional configuration
#
#	If-statements can be used to make configuration directives
#	depend on conditions:
#
#	    if <CONDITION>
#	        ... regular configuration directives ...
#	    [else
#	        ... regular configuration directives ...]
#	    endif
#
#	The else part is optional. The keywords "if", "else", and "endif"
#	must be typed on their own lines, as if they were regular
#	configuration directives.
#
#	NOTE: An else-if condition is not supported.
#
#	These individual conditions types are supported:
#
#	    true
#		Always evaluates to true.
#	    false
#		Always evaluates to false.
#	    <integer> = <integer>
#	        Equality comparison of two integer numbers.
#
#
#  SMP-Related Macros
#
#	The following SMP-related preprocessor macros can be used.
#
#	${process_name} expands to the current Squid process "name"
#	(e.g., squid1, squid2, or cache1).
#
#	${process_number} expands to the current Squid process
#	identifier, which is an integer number (e.g., 1, 2, 3) unique
#	across all Squid processes of the current service instance.
#
#	${service_name} expands into the current Squid service instance
#	name identifier which is provided by -n on the command line.
#

#  TAG: broken_vary_encoding
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: cache_vary
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: error_map
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: external_refresh_check
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: location_rewrite_program
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: refresh_stale_hit
#	This option is not yet supported by Squid-3.
#Default:
# none

#  TAG: hierarchy_stoplist
#	Remove this line. Use always_direct or cache_peer_access ACLs instead if you need to prevent cache_peer use.
#Default:
# none

#  TAG: log_access
#	Remove this line. Use acls with access_log directives to control access logging
#Default:
# none

#  TAG: log_icap
#	Remove this line. Use acls with icap_log directives to control icap logging
#Default:
# none

#  TAG: ignore_ims_on_miss
#	Remove this line. The HTTP/1.1 feature is now configured by 'cache_miss_revalidate'.
#Default:
# none

#  TAG: chunked_request_body_max_size
#	Remove this line. Squid is now HTTP/1.1 compliant.
#Default:
# none

#  TAG: dns_v4_fallback
#	Remove this line. Squid performs a 'Happy Eyeballs' algorithm, the 'fallback' algorithm is no longer relevant.
#Default:
# none

#  TAG: emulate_httpd_log
#	Replace this with an access_log directive using the format 'common' or 'combined'.
#Default:
# none

#  TAG: forward_log
#	Use a regular access.log with ACL limiting it to MISS events.
#Default:
# none

#  TAG: ftp_list_width
#	Remove this line. Configure FTP page display using the CSS controls in errorpages.css instead.
#Default:
# none

#  TAG: ignore_expect_100
#	Remove this line. The HTTP/1.1 feature is now fully supported by default.
#Default:
# none

#  TAG: log_fqdn
#	Remove this option from your config. To log FQDN use %>A in the log format.
#Default:
# none

#  TAG: log_ip_on_direct
#	Remove this option from your config. To log server or peer names use %<A in the log format.
#Default:
# none

#  TAG: maximum_single_addr_tries
#	Replaced by connect_retries. The behaviour has changed, please read the documentation before altering.
#Default:
# none

#  TAG: referer_log
#	Replace this with an access_log directive using the format 'referrer'.
#Default:
# none

#  TAG: update_headers
#	Remove this line. The feature is supported by default in storage types where update is implemented.
#Default:
# none

#  TAG: url_rewrite_concurrency
#	Remove this line. Set the 'concurrency=' option of url_rewrite_children instead.
#Default:
# none

#  TAG: useragent_log
#	Replace this with an access_log directive using the format 'useragent'.
#Default:
# none

#  TAG: dns_testnames
#	Remove this line. DNS is no longer tested on startup.
#Default:
# none

#  TAG: extension_methods
#	Remove this line. All valid methods for HTTP are accepted by default.
#Default:
# none

#  TAG: zero_buffers
#Default:
# none

#  TAG: incoming_rate
#Default:
# none

#  TAG: server_http11
#	Remove this line. HTTP/1.1 is supported by default.
#Default:
# none

#  TAG: upgrade_http0.9
#	Remove this line. ICY/1.0 streaming protocol is supported by default.
#Default:
# none

#  TAG: zph_local
#	Alter these entries. Use the qos_flows directive instead.
#Default:
# none

#  TAG: header_access
#	Since squid-3.0 replace with request_header_access or reply_header_access
#	depending on whether you wish to match client requests or server replies.
#Default:
# none

#  TAG: httpd_accel_no_pmtu_disc
#	Since squid-3.0 use the 'disable-pmtu-discovery' flag on http_port instead.
#Default:
# none

#  TAG: wais_relay_host
#	Replace this line with 'cache_peer' configuration.
#Default:
# none

#  TAG: wais_relay_port
#	Replace this line with 'cache_peer' configuration.
#Default:
# none

# OPTIONS FOR SMP
# -----------------------------------------------------------------------------

#  TAG: workers
#	Number of main Squid processes or "workers" to fork and maintain.
#	0: "no daemon" mode, like running "squid -N ..."
#	1: "no SMP" mode, start one main Squid process daemon (default)
#	N: start N main Squid process daemons (i.e., SMP mode)
#
#	In SMP mode, each worker does nearly all what a single Squid daemon
#	does (e.g., listen on http_port and forward HTTP requests).
#Default:
# SMP support disabled.

#  TAG: cpu_affinity_map
#	Usage: cpu_affinity_map process_numbers=P1,P2,... cores=C1,C2,...
#
#	Sets 1:1 mapping between Squid processes and CPU cores. For example,
#
#	    cpu_affinity_map process_numbers=1,2,3,4 cores=1,3,5,7
#
#	affects processes 1 through 4 only and places them on the first
#	four even cores, starting with core #1.
#
#	CPU cores are numbered starting from 1. Requires support for
#	sched_getaffinity(2) and sched_setaffinity(2) system calls.
#
#	Multiple cpu_affinity_map options are merged.
#
#	See also: workers
#Default:
# Let operating system decide.

# OPTIONS FOR AUTHENTICATION
# -----------------------------------------------------------------------------

#  TAG: auth_param
#	This is used to define parameters for the various authentication
#	schemes supported by Squid.
#
#		format: auth_param scheme parameter [setting]
#
#	The order in which authentication schemes are presented to the client is
#	dependent on the order the scheme first appears in config file. IE
#	has a bug (it's not RFC 2617 compliant) in that it will use the basic
#	scheme if basic is the first entry presented, even if more secure
#	schemes are presented. For now use the order in the recommended
#	settings section below. If other browsers have difficulties (don't
#	recognize the schemes offered even if you are using basic) either
#	put basic first, or disable the other schemes (by commenting out their
#	program entry).
#
#	Once an authentication scheme is fully configured, it can only be
#	shutdown by shutting squid down and restarting. Changes can be made on
#	the fly and activated with a reconfigure. I.E. You can change to a
#	different helper, but not unconfigure the helper completely.
#
#	Please note that while this directive defines how Squid processes
#	authentication it does not automatically activate authentication.
#	To use authentication you must in addition make use of ACLs based
#	on login name in http_access (proxy_auth, proxy_auth_regex or
#	external with %LOGIN used in the format tag). The browser will be
#	challenged for authentication on the first such acl encountered
#	in http_access processing and will also be re-challenged for new
#	login credentials if the request is being denied by a proxy_auth
#	type acl.
#
#	WARNING: authentication can't be used in a transparently intercepting
#	proxy as the client then thinks it is talking to an origin server and
#	not the proxy. This is a limitation of bending the TCP/IP protocol to
#	transparently intercepting port 80, not a limitation in Squid.
#	Ports flagged 'transparent', 'intercept', or 'tproxy' have
#	authentication disabled.
#
#	=== Parameters common to all schemes. ===
#
#	"program" cmdline
#		Specifies the command for the external authenticator.
#
#		By default, each authentication scheme is not used unless a
#		program is specified.
#
#		See http://wiki.squid-cache.org/Features/AddonHelpers for
#		more details on helper operations and creating your own.
#
#	"key_extras" format
#		Specifies a string to be append to request line format for
#		the authentication helper. "Quoted" format values may contain
#		spaces and logformat %macros. In theory, any logformat %macro
#		can be used. In practice, a %macro expands as a dash (-) if
#		the helper request is sent before the required macro
#		information is available to Squid.
#
#		By default, Squid uses request formats provided in
#		scheme-specific examples below (search for %credentials).
#
#		The expanded key_extras value is added to the Squid credentials
#		cache and, hence, will affect authentication. It can be used to
#		autenticate different users with identical user names (e.g.,
#		when user authentication depends on http_port).
#
#		Avoid adding frequently changing information to key_extras. For
#		example, if you add user source IP, and it changes frequently
#		in your environment, then max_user_ip ACL is going to treat
#		every user+IP combination as a unique "user", breaking the ACL
#		and wasting a lot of memory on those user records. It will also
#		force users to authenticate from scratch whenever their IP
#		changes.
#
#	"realm" string
#		Specifies the protection scope (aka realm name) which is to be
#		reported to the client for the authentication scheme. It is
#		commonly part of the text the user will see when prompted for
#		their username and password.
#
#		For Basic the default is "Squid proxy-caching web server".
#		For Digest there is no default, this parameter is mandatory.
#		For NTLM and Negotiate this parameter is ignored.
#
#	"children" numberofchildren [startup=N] [idle=N] [concurrency=N]
#
#		The maximum number of authenticator processes to spawn. If
#		you start too few Squid will have to wait for them to process
#		a backlog of credential verifications, slowing it down. When
#		password verifications are done via a (slow) network you are
#		likely to need lots of authenticator processes.
#
#		The startup= and idle= options permit some skew in the exact
#		amount run. A minimum of startup=N will begin during startup
#		and reconfigure. Squid will start more in groups of up to
#		idle=N in an attempt to meet traffic needs and to keep idle=N
#		free above those traffic needs up to the maximum.
#
#		The concurrency= option sets the number of concurrent requests
#		the helper can process.  The default of 0 is used for helpers
#		who only supports one request at a time. Setting this to a
#		number greater than 0 changes the protocol used to include a
#		channel ID field first on the request/response line, allowing
#		multiple requests to be sent to the same helper in parallel
#		without waiting for the response.
#
#		Concurrency must not be set unless it's known the helper
#		supports the input format with channel-ID fields.
#
#		NOTE: NTLM and Negotiate schemes do not support concurrency
#			in the Squid code module even though some helpers can.
#
#
#
#	=== Example Configuration ===
#
#	This configuration displays the recommended authentication scheme
#	order from most to least secure with recommended minimum configuration
#	settings for each scheme:
#
##auth_param negotiate program <uncomment and complete this line to activate>
##auth_param negotiate children 20 startup=0 idle=1
##auth_param negotiate keep_alive on
##
##auth_param digest program <uncomment and complete this line to activate>
##auth_param digest children 20 startup=0 idle=1
##auth_param digest realm Squid proxy-caching web server
##auth_param digest nonce_garbage_interval 5 minutes
##auth_param digest nonce_max_duration 30 minutes
##auth_param digest nonce_max_count 50
##
##auth_param ntlm program <uncomment and complete this line to activate>
##auth_param ntlm children 20 startup=0 idle=1
##auth_param ntlm keep_alive on
##
##auth_param basic program <uncomment and complete this line>
##auth_param basic children 5 startup=5 idle=1
##auth_param basic realm Squid proxy-caching web server
##auth_param basic credentialsttl 2 hours
#Default:
# none

#  TAG: authenticate_cache_garbage_interval
#	The time period between garbage collection across the username cache.
#	This is a trade-off between memory utilization (long intervals - say
#	2 days) and CPU (short intervals - say 1 minute). Only change if you
#	have good reason to.
#Default:
# authenticate_cache_garbage_interval 1 hour

#  TAG: authenticate_ttl
#	The time a user & their credentials stay in the logged in
#	user cache since their last request. When the garbage
#	interval passes, all user credentials that have passed their
#	TTL are removed from memory.
#Default:
# authenticate_ttl 1 hour

#  TAG: authenticate_ip_ttl
#	If you use proxy authentication and the 'max_user_ip' ACL,
#	this directive controls how long Squid remembers the IP
#	addresses associated with each user.  Use a small value
#	(e.g., 60 seconds) if your users might change addresses
#	quickly, as is the case with dialup.   You might be safe
#	using a larger value (e.g., 2 hours) in a corporate LAN
#	environment with relatively static address assignments.
#Default:
# authenticate_ip_ttl 1 second

# ACCESS CONTROLS
# -----------------------------------------------------------------------------

#  TAG: external_acl_type
#	This option defines external acl classes using a helper program
#	to look up the status
#
#	  external_acl_type name [options] FORMAT.. /path/to/helper [helper arguments..]
#
#	Options:
#
#	  ttl=n		TTL in seconds for cached results (defaults to 3600
#	  		for 1 hour)
#
#	  negative_ttl=n
#	  		TTL for cached negative lookups (default same
#	  		as ttl)
#
#	  grace=n	Percentage remaining of TTL where a refresh of a
#			cached entry should be initiated without needing to
#			wait for a new reply. (default is for no grace period)
#
#	  cache=n	Limit the result cache size, default is 262144.
#			The expanded FORMAT value is used as the cache key, so
#			if the details in FORMAT are highly variable a larger
#			cache may be needed to produce reduction in helper load.
#
#	  children-max=n
#			Maximum number of acl helper processes spawned to service
#			external acl lookups of this type. (default 20)
#
#	  children-startup=n
#			Minimum number of acl helper processes to spawn during
#			startup and reconfigure to service external acl lookups
#			of this type. (default 0)
#
#	  children-idle=n
#			Number of acl helper processes to keep ahead of traffic
#			loads. Squid will spawn this many at once whenever load
#			rises above the capabilities of existing processes.
#			Up to the value of children-max. (default 1)
#
#	  concurrency=n	concurrency level per process. Only used with helpers
#			capable of processing more than one query at a time.
#
#	  protocol=2.5	Compatibility mode for Squid-2.5 external acl helpers.
#
#	  ipv4 / ipv6	IP protocol used to communicate with this helper.
#			The default is to auto-detect IPv6 and use it when available.
#
#
#	FORMAT specifications
#
#	  %LOGIN	Authenticated user login name
#	  %un		A user name. Expands to the first available name
#	  		from the following list of information sources:
#			- authenticated user name, like %ul or %LOGIN
#			- user name sent by an external ACL, like %EXT_USER
#			- SSL client name, like %us in logformat
#			- ident user name, like %ui in logformat
#	  %EXT_USER	Username from previous external acl
#	  %EXT_LOG	Log details from previous external acl
#	  %EXT_TAG	Tag from previous external acl
#	  %IDENT	Ident user name
#	  %SRC		Client IP
#	  %SRCPORT	Client source port
#	  %URI		Requested URI
#	  %DST		Requested host
#	  %PROTO	Requested URL scheme
#	  %PORT		Requested port
#	  %PATH		Requested URL path
#	  %METHOD	Request method
#	  %MYADDR	Squid interface address
#	  %MYPORT	Squid http_port number
#	  %PATH		Requested URL-path (including query-string if any)
#	  %USER_CERT	SSL User certificate in PEM format
#	  %USER_CERTCHAIN SSL User certificate chain in PEM format
#	  %USER_CERT_xx	SSL User certificate subject attribute xx
#	  %USER_CA_CERT_xx SSL User certificate issuer attribute xx
#	  %ssl::>sni	SSL client SNI sent to Squid
#	  %ssl::<cert_subject SSL server certificate DN
#	  %ssl::<cert_issuer SSL server certificate issuer DN
#
#	  %>{Header}	HTTP request header "Header"
#	  %>{Hdr:member}
#	  		HTTP request header "Hdr" list member "member"
#	  %>{Hdr:;member}
#	  		HTTP request header list member using ; as
#	  		list separator. ; can be any non-alphanumeric
#			character.
#
#	  %<{Header}	HTTP reply header "Header"
#	  %<{Hdr:member}
#	  		HTTP reply header "Hdr" list member "member"
#	  %<{Hdr:;member}
#	  		HTTP reply header list member using ; as
#	  		list separator. ; can be any non-alphanumeric
#			character.
#
#	  %ACL		The name of the ACL being tested.
#	  %DATA		The ACL arguments. If not used then any arguments
#			is automatically added at the end of the line
#			sent to the helper.
#			NOTE: this will encode the arguments as one token,
#			whereas the default will pass each separately.
#
#	  %%		The percent sign. Useful for helpers which need
#			an unchanging input format.
#
#
#	General request syntax:
#
#	  [channel-ID] FORMAT-values [acl-values ...]
#
#
#	FORMAT-values consists of transaction details expanded with
#	whitespace separation per the config file FORMAT specification
#	using the FORMAT macros listed above.
#
#	acl-values consists of any string specified in the referencing
#	config 'acl ... external' line. see the "acl external" directive.
#
#	Request values sent to the helper are URL escaped to protect
#	each value in requests against whitespaces.
#
#	If using protocol=2.5 then the request sent to the helper is not
#	URL escaped to protect against whitespace.
#
#	NOTE: protocol=3.0 is deprecated as no longer necessary.
#
#	When using the concurrency= option the protocol is changed by
#	introducing a query channel tag in front of the request/response.
#	The query channel tag is a number between 0 and concurrency-1.
#	This value must be echoed back unchanged to Squid as the first part
#	of the response relating to its request.
#
#
#	The helper receives lines expanded per the above format specification
#	and for each input line returns 1 line starting with OK/ERR/BH result
#	code and optionally followed by additional keywords with more details.
#
#
#	General result syntax:
#
#	  [channel-ID] result keyword=value ...
#
#	Result consists of one of the codes:
#
#	  OK
#		the ACL test produced a match.
#
#	  ERR
#		the ACL test does not produce a match.
#
#	  BH
#		An internal error occurred in the helper, preventing
#		a result being identified.
#
#	The meaning of 'a match' is determined by your squid.conf
#	access control configuration. See the Squid wiki for details.
#
#	Defined keywords:
#
#	  user=		The users name (login)
#
#	  password=	The users password (for login= cache_peer option)
#
#	  message=	Message describing the reason for this response.
#			Available as %o in error pages.
#			Useful on (ERR and BH results).
#
#	  tag=		Apply a tag to a request. Only sets a tag once,
#			does not alter existing tags.
#
#	  log=		String to be logged in access.log. Available as
#	  		%ea in logformat specifications.
#
#  	  clt_conn_tag= Associates a TAG with the client TCP connection.
#			Please see url_rewrite_program related documentation
#			for this kv-pair.
#
#	Any keywords may be sent on any response whether OK, ERR or BH.
#
#	All response keyword values need to be a single token with URL
#	escaping, or enclosed in double quotes (") and escaped using \ on
#	any double quotes or \ characters within the value. The wrapping
#	double quotes are removed before the value is interpreted by Squid.
#	\r and \n are also replace by CR and LF.
#
#	Some example key values:
#
#		user=John%20Smith
#		user="John Smith"
#		user="J. \"Bob\" Smith"
#Default:
# none

#  TAG: acl
#	Defining an Access List
#
#	Every access list definition must begin with an aclname and acltype, 
#	followed by either type-specific arguments or a quoted filename that
#	they are read from.
#
#	   acl aclname acltype argument ...
#	   acl aclname acltype "file" ...
#
#	When using "file", the file should contain one item per line.
#
#	Some acl types supports options which changes their default behaviour.
#	The available options are:
#
#	-i,+i	By default, regular expressions are CASE-SENSITIVE. To make them
#		case-insensitive, use the -i option. To return case-sensitive
#		use the +i option between patterns, or make a new ACL line
#		without -i.	
#
#	-n	Disable lookups and address type conversions.  If lookup or
#		conversion is required because the parameter type (IP or
#		domain name) does not match the message address type (domain
#		name or IP), then the ACL would immediately declare a mismatch
#		without any warnings or lookups.
#
#	--	Used to stop processing all options, in the case the first acl
#		value has '-' character as first character (for example the '-'
#		is a valid domain name)
#
#	Some acl types require suspending the current request in order
#	to access some external data source.
#	Those which do are marked with the tag [slow], those which
#	don't are marked as [fast].
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl
#	for further information
#
#	***** ACL TYPES AVAILABLE *****
#
#	acl aclname src ip-address/mask ...	# clients IP address [fast]
#	acl aclname src addr1-addr2/mask ...	# range of addresses [fast]
#	acl aclname dst [-n] ip-address/mask ...	# URL host's IP address [slow]
#	acl aclname localip ip-address/mask ... # IP address the client connected to [fast]
#
#	acl aclname arp      mac-address ... (xx:xx:xx:xx:xx:xx notation)
#	  # [fast]
#	  # The 'arp' ACL code is not portable to all operating systems.
#	  # It works on Linux, Solaris, Windows, FreeBSD, and some other
#	  # BSD variants.
#	  #
#	  # NOTE: Squid can only determine the MAC/EUI address for IPv4
#	  # clients that are on the same subnet. If the client is on a
#	  # different subnet, then Squid cannot find out its address.
#	  #
#	  # NOTE 2: IPv6 protocol does not contain ARP. MAC/EUI is either
#	  # encoded directly in the IPv6 address or not available.
#
#	acl aclname srcdomain   .foo.com ...
#	  # reverse lookup, from client IP [slow]
#	acl aclname dstdomain [-n] .foo.com ...
#	  # Destination server from URL [fast]
#	acl aclname srcdom_regex [-i] \.foo\.com ...
#	  # regex matching client name [slow]
#	acl aclname dstdom_regex [-n] [-i] \.foo\.com ...
#	  # regex matching server [fast]
#	  #
#	  # For dstdomain and dstdom_regex a reverse lookup is tried if a IP
#	  # based URL is used and no match is found. The name "none" is used
#	  # if the reverse lookup fails.
#
#	acl aclname src_as number ...
#	acl aclname dst_as number ...
#	  # [fast]
#	  # Except for access control, AS numbers can be used for
#	  # routing of requests to specific caches. Here's an
#	  # example for routing all requests for AS#1241 and only
#	  # those to mycache.mydomain.net:
#	  # acl asexample dst_as 1241
#	  # cache_peer_access mycache.mydomain.net allow asexample
#	  # cache_peer_access mycache_mydomain.net deny all
#
#	acl aclname peername myPeer ...
#	  # [fast]
#	  # match against a named cache_peer entry
#	  # set unique name= on cache_peer lines for reliable use.
#
#	acl aclname time [day-abbrevs] [h1:m1-h2:m2]
#	  # [fast]
#	  #  day-abbrevs:
#	  #	S - Sunday
#	  #	M - Monday
#	  #	T - Tuesday
#	  #	W - Wednesday
#	  #	H - Thursday
#	  #	F - Friday
#	  #	A - Saturday
#	  #  h1:m1 must be less than h2:m2
#
#	acl aclname url_regex [-i] ^http:// ...
#	  # regex matching on whole URL [fast]
#	acl aclname urllogin [-i] [^a-zA-Z0-9] ...
#	  # regex matching on URL login field
#	acl aclname urlpath_regex [-i] \.gif$ ...
#	  # regex matching on URL path [fast]
#
#	acl aclname port 80 70 21 0-1024...   # destination TCP port [fast]
#	                                      # ranges are alloed
#	acl aclname localport 3128 ...	      # TCP port the client connected to [fast]
#	                                      # NP: for interception mode this is usually '80'
#
#	acl aclname myportname 3128 ...       # *_port name [fast]
#
#	acl aclname proto HTTP FTP ...        # request protocol [fast]
# 
#	acl aclname method GET POST ...       # HTTP request method [fast]
#
#	acl aclname http_status 200 301 500- 400-403 ... 
#	  # status code in reply [fast]
#
#	acl aclname browser [-i] regexp ...
#	  # pattern match on User-Agent header (see also req_header below) [fast]
#
#	acl aclname referer_regex [-i] regexp ...
#	  # pattern match on Referer header [fast]
#	  # Referer is highly unreliable, so use with care
#
#	acl aclname ident username ...
#	acl aclname ident_regex [-i] pattern ...
#	  # string match on ident output [slow]
#	  # use REQUIRED to accept any non-null ident.
#
#	acl aclname proxy_auth [-i] username ...
#	acl aclname proxy_auth_regex [-i] pattern ...
#	  # perform http authentication challenge to the client and match against
#	  # supplied credentials [slow]
#	  #
#	  # takes a list of allowed usernames.
#	  # use REQUIRED to accept any valid username.
#	  #
#	  # Will use proxy authentication in forward-proxy scenarios, and plain
#	  # http authenticaiton in reverse-proxy scenarios
#	  #
#	  # NOTE: when a Proxy-Authentication header is sent but it is not
#	  # needed during ACL checking the username is NOT logged
#	  # in access.log.
#	  #
#	  # NOTE: proxy_auth requires a EXTERNAL authentication program
#	  # to check username/password combinations (see
#	  # auth_param directive).
#	  #
#	  # NOTE: proxy_auth can't be used in a transparent/intercepting proxy
#	  # as the browser needs to be configured for using a proxy in order
#	  # to respond to proxy authentication.
#
#	acl aclname snmp_community string ...
#	  # A community string to limit access to your SNMP Agent [fast]
#	  # Example:
#	  #
#	  #	acl snmppublic snmp_community public
#
#	acl aclname maxconn number
#	  # This will be matched when the client's IP address has
#	  # more than <number> TCP connections established. [fast]
#	  # NOTE: This only measures direct TCP links so X-Forwarded-For
#	  # indirect clients are not counted.
#
#	acl aclname max_user_ip [-s] number
#	  # This will be matched when the user attempts to log in from more
#	  # than <number> different ip addresses. The authenticate_ip_ttl
#	  # parameter controls the timeout on the ip entries. [fast]
#	  # If -s is specified the limit is strict, denying browsing
#	  # from any further IP addresses until the ttl has expired. Without
#	  # -s Squid will just annoy the user by "randomly" denying requests.
#	  # (the counter is reset each time the limit is reached and a
#	  # request is denied)
#	  # NOTE: in acceleration mode or where there is mesh of child proxies,
#	  # clients may appear to come from multiple addresses if they are
#	  # going through proxy farms, so a limit of 1 may cause user problems.
#
#	acl aclname random probability
#	  # Pseudo-randomly match requests. Based on the probability given.
#	  # Probability may be written as a decimal (0.333), fraction (1/3)
#	  # or ratio of matches:non-matches (3:5).
#
#	acl aclname req_mime_type [-i] mime-type ...
#	  # regex match against the mime type of the request generated
#	  # by the client. Can be used to detect file upload or some
#	  # types HTTP tunneling requests [fast]
#	  # NOTE: This does NOT match the reply. You cannot use this
#	  # to match the returned file type.
#
#	acl aclname req_header header-name [-i] any\.regex\.here
#	  # regex match against any of the known request headers.  May be
#	  # thought of as a superset of "browser", "referer" and "mime-type"
#	  # ACL [fast]
#
#	acl aclname rep_mime_type [-i] mime-type ...
#	  # regex match against the mime type of the reply received by
#	  # squid. Can be used to detect file download or some
#	  # types HTTP tunneling requests. [fast]
#	  # NOTE: This has no effect in http_access rules. It only has
#	  # effect in rules that affect the reply data stream such as
#	  # http_reply_access.
#
#	acl aclname rep_header header-name [-i] any\.regex\.here
#	  # regex match against any of the known reply headers. May be
#	  # thought of as a superset of "browser", "referer" and "mime-type"
#	  # ACLs [fast]
#
#	acl aclname external class_name [arguments...]
#	  # external ACL lookup via a helper class defined by the
#	  # external_acl_type directive [slow]
#
#	acl aclname user_cert attribute values...
#	  # match against attributes in a user SSL certificate
#	  # attribute is one of DN/C/O/CN/L/ST or a numerical OID [fast]
#
#	acl aclname ca_cert attribute values...
#	  # match against attributes a users issuing CA SSL certificate
#	  # attribute is one of DN/C/O/CN/L/ST or a numerical OID  [fast]
#
#	acl aclname ext_user username ...
#	acl aclname ext_user_regex [-i] pattern ...
#	  # string match on username returned by external acl helper [slow]
#	  # use REQUIRED to accept any non-null user name.
#
#	acl aclname tag tagvalue ...
#	  # string match on tag returned by external acl helper [fast]
#	  # DEPRECATED. Only the first tag will match with this ACL.
#	  # Use the 'note' ACL instead for handling multiple tag values.
#
#	acl aclname hier_code codename ...
#	  # string match against squid hierarchy code(s); [fast]
#	  #  e.g., DIRECT, PARENT_HIT, NONE, etc.
#	  #
#	  # NOTE: This has no effect in http_access rules. It only has
#	  # effect in rules that affect the reply data stream such as
#	  # http_reply_access.
#
#	acl aclname note name [value ...]
#	  # match transaction annotation [fast]
#	  # Without values, matches any annotation with a given name.
#	  # With value(s), matches any annotation with a given name that
#	  # also has one of the given values.
#	  # Names and values are compared using a string equality test.
#	  # Annotation sources include note and adaptation_meta directives
#	  # as well as helper and eCAP responses.
#
#	acl aclname adaptation_service service ...
#	  # Matches the name of any icap_service, ecap_service,
#	  # adaptation_service_set, or adaptation_service_chain that Squid
#	  # has used (or attempted to use) for the master transaction.
#	  # This ACL must be defined after the corresponding adaptation
#	  # service is named in squid.conf. This ACL is usable with
#	  # adaptation_meta because it starts matching immediately after
#	  # the service has been selected for adaptation.
#
#	acl aclname any-of acl1 acl2 ...
#	  # match any one of the acls [fast or slow]
#	  # The first matching ACL stops further ACL evaluation.
#	  #
#	  # ACLs from multiple any-of lines with the same name are ORed.
#	  # For example, A = (a1 or a2) or (a3 or a4) can be written as
#	  #   acl A any-of a1 a2
#	  #   acl A any-of a3 a4
#	  #
#	  # This group ACL is fast if all evaluated ACLs in the group are fast
#	  # and slow otherwise.
#
#	acl aclname all-of acl1 acl2 ... 
#	  # match all of the acls [fast or slow]
#	  # The first mismatching ACL stops further ACL evaluation.
#	  #
#	  # ACLs from multiple all-of lines with the same name are ORed.
#	  # For example, B = (b1 and b2) or (b3 and b4) can be written as
#	  #   acl B all-of b1 b2
#	  #   acl B all-of b3 b4
#	  #
#	  # This group ACL is fast if all evaluated ACLs in the group are fast
#	  # and slow otherwise.
#
#	Examples:
#		acl macaddress arp 09:00:2b:23:45:67
#		acl myexample dst_as 1241
#		acl password proxy_auth REQUIRED
#		acl fileupload req_mime_type -i ^multipart/form-data$
#		acl javascript rep_mime_type -i ^application/x-javascript$
#
#Default:
# ACLs all, manager, localhost, and to_localhost are predefined.
#
#
# Recommended minimum configuration:
#

# Example rule allowing access from your local networks.
# Adapt to list your (internal) IP networks from where browsing
# should be allowed
#acl localnet src 10.0.0.0/8	# RFC1918 possible internal network
#acl localnet src 172.16.0.0/12	# RFC1918 possible internal network
#acl localnet src 192.168.0.0/16	# RFC1918 possible internal network
#acl localnet src fc00::/7       # RFC 4193 local private network range
#acl localnet src fe80::/10      # RFC 4291 link-local (directly plugged) machines

#acl SSL_ports port 443
#acl Safe_ports port 80		# http
#acl Safe_ports port 21		# ftp
#acl Safe_ports port 443		# https
#acl Safe_ports port 70		# gopher
#acl Safe_ports port 210		# wais
#acl Safe_ports port 1025-65535	# unregistered ports
#acl Safe_ports port 280		# http-mgmt
#acl Safe_ports port 488		# gss-http
#acl Safe_ports port 591		# filemaker
#acl Safe_ports port 777		# multiling http
#acl CONNECT method CONNECT

#  TAG: proxy_protocol_access
#	Determine which client proxies can be trusted to provide correct
#	information regarding real client IP address using PROXY protocol.
#
#	Requests may pass through a chain of several other proxies
#	before reaching us. The original source details may by sent in:
#		* HTTP message Forwarded header, or
#		* HTTP message X-Forwarded-For header, or
#		* PROXY protocol connection header.
#
#	This directive is solely for validating new PROXY protocol
#	connections received from a port flagged with require-proxy-header.
#	It is checked only once after TCP connection setup.
#
#	A deny match results in TCP connection closure.
#
#	An allow match is required for Squid to permit the corresponding
#	TCP connection, before Squid even looks for HTTP request headers.
#	If there is an allow match, Squid starts using PROXY header information
#	to determine the source address of the connection for all future ACL
#	checks, logging, etc.
#
#	SECURITY CONSIDERATIONS:
#
#		Any host from which we accept client IP details can place
#		incorrect information in the relevant header, and Squid
#		will use the incorrect information as if it were the
#		source address of the request.  This may enable remote
#		hosts to bypass any access control restrictions that are
#		based on the client's source addresses.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# all TCP connections to ports with require-proxy-header will be denied

#  TAG: follow_x_forwarded_for
#	Determine which client proxies can be trusted to provide correct
#	information regarding real client IP address.
#
#	Requests may pass through a chain of several other proxies
#	before reaching us. The original source details may by sent in:
#		* HTTP message Forwarded header, or
#		* HTTP message X-Forwarded-For header, or
#		* PROXY protocol connection header.
#
#	PROXY protocol connections are controlled by the proxy_protocol_access
#	directive which is checked before this.
#
#	If a request reaches us from a source that is allowed by this
#	directive, then we trust the information it provides regarding
#	the IP of the client it received from (if any).
#
#	For the purpose of ACLs used in this directive the src ACL type always
#	matches the address we are testing and srcdomain matches its rDNS.
#
#	On each HTTP request Squid checks for X-Forwarded-For header fields.
#	If found the header values are iterated in reverse order and an allow
#	match is required for Squid to continue on to the next value.
#	The verification ends when a value receives a deny match, cannot be
#	tested, or there are no more values to test.
#	NOTE: Squid does not yet follow the Forwarded HTTP header.
#
#	The end result of this process is an IP address that we will
#	refer to as the indirect client address.  This address may
#	be treated as the client address for access control, ICAP, delay
#	pools and logging, depending on the acl_uses_indirect_client,
#	icap_uses_indirect_client, delay_pool_uses_indirect_client, 
#	log_uses_indirect_client and tproxy_uses_indirect_client options.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#	SECURITY CONSIDERATIONS:
#
#		Any host from which we accept client IP details can place
#		incorrect information in the relevant header, and Squid
#		will use the incorrect information as if it were the
#		source address of the request.  This may enable remote
#		hosts to bypass any access control restrictions that are
#		based on the client's source addresses.
#
#	For example:
#
#		acl localhost src 127.0.0.1
#		acl my_other_proxy srcdomain .proxy.example.com
#		follow_x_forwarded_for allow localhost
#		follow_x_forwarded_for allow my_other_proxy
#Default:
# X-Forwarded-For header will be ignored.

#  TAG: acl_uses_indirect_client	on|off
#	Controls whether the indirect client address
#	(see follow_x_forwarded_for) is used instead of the
#	direct client address in acl matching.
#
#	NOTE: maxconn ACL considers direct TCP links and indirect
#	      clients will always have zero. So no match.
#Default:
# acl_uses_indirect_client on

#  TAG: delay_pool_uses_indirect_client	on|off
#	Controls whether the indirect client address
#	(see follow_x_forwarded_for) is used instead of the
#	direct client address in delay pools.
#Default:
# delay_pool_uses_indirect_client on

#  TAG: log_uses_indirect_client	on|off
#	Controls whether the indirect client address
#	(see follow_x_forwarded_for) is used instead of the
#	direct client address in the access log.
#Default:
# log_uses_indirect_client on

#  TAG: tproxy_uses_indirect_client	on|off
#	Controls whether the indirect client address
#	(see follow_x_forwarded_for) is used instead of the
#	direct client address when spoofing the outgoing client.
#
#	This has no effect on requests arriving in non-tproxy
#	mode ports.
#
#	SECURITY WARNING: Usage of this option is dangerous
#	and should not be used trivially. Correct configuration
#	of follow_x_forwarded_for with a limited set of trusted
#	sources is required to prevent abuse of your proxy.
#Default:
# tproxy_uses_indirect_client off

#  TAG: spoof_client_ip
#	Control client IP address spoofing of TPROXY traffic based on
#	defined access lists.
#
#	spoof_client_ip allow|deny [!]aclname ...
#
#	If there are no "spoof_client_ip" lines present, the default
#	is to "allow" spoofing of any suitable request.
#
#	Note that the cache_peer "no-tproxy" option overrides this ACL.
#
#	This clause supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow spoofing on all TPROXY traffic.

#  TAG: http_access
#	Allowing or Denying access based on defined access lists
#
#	To allow or deny a message received on an HTTP, HTTPS, or FTP port:
#	http_access allow|deny [!]aclname ...
#
#	NOTE on default values:
#
#	If there are no "access" lines present, the default is to deny
#	the request.
#
#	If none of the "access" lines cause a match, the default is the
#	opposite of the last line in the list.  If the last line was
#	deny, the default is allow.  Conversely, if the last line
#	is allow, the default will be deny.  For these reasons, it is a
#	good idea to have an "deny all" entry at the end of your access
#	lists to avoid potential confusion.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#Default:
# Deny, unless rules exist in squid.conf.
#

#
# Recommended minimum Access Permission configuration:
#
# Deny requests to certain unsafe ports
#http_access deny !Safe_ports

# Deny CONNECT to other than secure SSL ports
#http_access deny CONNECT !SSL_ports

# Only allow cachemgr access from localhost
#http_access allow localhost manager
#http_access deny manager

# We strongly recommend the following be uncommented to protect innocent
# web applications running on the proxy server who think the only
# one who can access services on "localhost" is a local user
#http_access deny to_localhost

#
# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
#

# Example rule allowing access from your local networks.
# Adapt localnet in the ACL section to list your (internal) IP networks
# from where browsing should be allowed
#http_access allow localnet
#http_access allow localhost

# And finally deny all other access to this proxy
#http_access deny all

http_access allow all

#  TAG: adapted_http_access
#	Allowing or Denying access based on defined access lists
#
#	Essentially identical to http_access, but runs after redirectors
#	and ICAP/eCAP adaptation. Allowing access control based on their
#	output.
#
#	If not set then only http_access is used.
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: http_reply_access
#	Allow replies to client requests. This is complementary to http_access.
#
#	http_reply_access allow|deny [!] aclname ...
#
#	NOTE: if there are no access lines present, the default is to allow
#	all replies.
#
#	If none of the access lines cause a match the opposite of the
#	last line will apply. Thus it is good practice to end the rules
#	with an "allow all" or "deny all" entry.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: icp_access
#	Allowing or Denying access to the ICP port based on defined
#	access lists
#
#	icp_access  allow|deny [!]aclname ...
#
#	NOTE: The default if no icp_access lines are present is to
#	deny all traffic. This default may cause problems with peers
#	using ICP.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
## Allow ICP queries from local networks only
##icp_access allow localnet
##icp_access deny all
#Default:
# Deny, unless rules exist in squid.conf.

#  TAG: htcp_access
#	Allowing or Denying access to the HTCP port based on defined
#	access lists
#
#	htcp_access  allow|deny [!]aclname ...
#
#	See also htcp_clr_access for details on access control for
#	cache purge (CLR) HTCP messages.
#
#	NOTE: The default if no htcp_access lines are present is to
#	deny all traffic. This default may cause problems with peers
#	using the htcp option.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
## Allow HTCP queries from local networks only
##htcp_access allow localnet
##htcp_access deny all
#Default:
# Deny, unless rules exist in squid.conf.

#  TAG: htcp_clr_access
#	Allowing or Denying access to purge content using HTCP based
#	on defined access lists.
#	See htcp_access for details on general HTCP access control.
#
#	htcp_clr_access  allow|deny [!]aclname ...
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
## Allow HTCP CLR requests from trusted peers
#acl htcp_clr_peer src 192.0.2.2 2001:DB8::2
#htcp_clr_access allow htcp_clr_peer
#htcp_clr_access deny all
#Default:
# Deny, unless rules exist in squid.conf.

#  TAG: miss_access
#	Determines whether network access is permitted when satisfying a request.
#
#	For example;
#	    to force your neighbors to use you as a sibling instead of
#	    a parent.
#
#		acl localclients src 192.0.2.0/24 2001:DB8::a:0/64
#		miss_access deny  !localclients
#		miss_access allow all
#
#	This means only your local clients are allowed to fetch relayed/MISS
#	replies from the network and all other clients can only fetch cached
#	objects (HITs).
#
#	The default for this setting allows all clients who passed the
#	http_access rules to relay via this proxy.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: ident_lookup_access
#	A list of ACL elements which, if matched, cause an ident
#	(RFC 931) lookup to be performed for this request.  For
#	example, you might choose to always perform ident lookups
#	for your main multi-user Unix boxes, but not for your Macs
#	and PCs.  By default, ident lookups are not performed for
#	any requests.
#
#	To enable ident lookups for specific client addresses, you
#	can follow this example:
#
#	acl ident_aware_hosts src 198.168.1.0/24
#	ident_lookup_access allow ident_aware_hosts
#	ident_lookup_access deny all
#
#	Only src type ACL checks are fully supported.  A srcdomain
#	ACL might work at times, but it will not always provide
#	the correct result.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Unless rules exist in squid.conf, IDENT is not fetched.

#  TAG: reply_body_max_size	size [acl acl...]
#	This option specifies the maximum size of a reply body. It can be
#	used to prevent users from downloading very large files, such as
#	MP3's and movies. When the reply headers are received, the
#	reply_body_max_size lines are processed, and the first line where
#	all (if any) listed ACLs are true is used as the maximum body size
#	for this reply.
#
#	This size is checked twice. First when we get the reply headers,
#	we check the content-length value.  If the content length value exists
#	and is larger than the allowed size, the request is denied and the
#	user receives an error message that says "the request or reply
#	is too large." If there is no content-length, and the reply
#	size exceeds this limit, the client's connection is just closed
#	and they will receive a partial reply.
#
#	WARNING: downstream caches probably can not detect a partial reply
#	if there is no content-length header, so they will cache
#	partial responses and give them out as hits.  You should NOT
#	use this option if you have downstream caches.
#
#	WARNING: A maximum size smaller than the size of squid's error messages
#	will cause an infinite loop and crash squid. Ensure that the smallest
#	non-zero value you use is greater that the maximum header size plus
#	the size of your largest error page.
#
#	If you set this parameter none (the default), there will be
#	no limit imposed.
#
#	Configuration Format is:
#		reply_body_max_size SIZE UNITS [acl ...]
#	ie.
#		reply_body_max_size 10 MB
#
#Default:
# No limit is applied.

# NETWORK OPTIONS
# -----------------------------------------------------------------------------

#  TAG: http_port
#	Usage:	port [mode] [options]
#		hostname:port [mode] [options]
#		1.2.3.4:port [mode] [options]
#
#	The socket addresses where Squid will listen for HTTP client
#	requests.  You may specify multiple socket addresses.
#	There are three forms: port alone, hostname with port, and
#	IP address with port.  If you specify a hostname or IP
#	address, Squid binds the socket to that specific
#	address. Most likely, you do not need to bind to a specific
#	address, so you can use the port number alone.
#
#	If you are running Squid in accelerator mode, you
#	probably want to listen on port 80 also, or instead.
#
#	The -a command line option may be used to specify additional
#	port(s) where Squid listens for proxy request. Such ports will
#	be plain proxy ports with no options.
#
#	You may specify multiple socket addresses on multiple lines.
#
#	Modes:
#
#	   intercept	Support for IP-Layer NAT interception delivering
#			traffic to this Squid port.
#			NP: disables authentication on the port.
#
#	   tproxy	Support Linux TPROXY (or BSD divert-to) with spoofing
#			of outgoing connections using the client IP address.
#			NP: disables authentication on the port.
#
#	   accel	Accelerator / reverse proxy mode
#
#	   ssl-bump	For each CONNECT request allowed by ssl_bump ACLs,
#			establish secure connection with the client and with
#			the server, decrypt HTTPS messages as they pass through
#			Squid, and treat them as unencrypted HTTP messages,
#			becoming the man-in-the-middle.
#
#			The ssl_bump option is required to fully enable
#			bumping of CONNECT requests.
#
#	Omitting the mode flag causes default forward proxy mode to be used.
#
#
#	Accelerator Mode Options:
#
#	   defaultsite=domainname
#			What to use for the Host: header if it is not present
#			in a request. Determines what site (not origin server)
#			accelerators should consider the default.
#
#	   no-vhost	Disable using HTTP/1.1 Host header for virtual domain support.
#
#	   protocol=	Protocol to reconstruct accelerated and intercepted
#			requests with. Defaults to HTTP/1.1 for http_port and
#			HTTPS/1.1 for https_port.
#			When an unsupported value is configured Squid will
#			produce a FATAL error.
#			Values: HTTP or HTTP/1.1, HTTPS or HTTPS/1.1
#
#	   vport	Virtual host port support. Using the http_port number
#			instead of the port passed on Host: headers.
#
#	   vport=NN	Virtual host port support. Using the specified port
#			number instead of the port passed on Host: headers.
#
#	   act-as-origin
#			Act as if this Squid is the origin server.
#			This currently means generate new Date: and Expires:
#			headers on HIT instead of adding Age:.
#
#	   ignore-cc	Ignore request Cache-Control headers.
#
#			WARNING: This option violates HTTP specifications if
#			used in non-accelerator setups.
#
#	   allow-direct	Allow direct forwarding in accelerator mode. Normally
#			accelerated requests are denied direct forwarding as if
#			never_direct was used.
#
#			WARNING: this option opens accelerator mode to security
#			vulnerabilities usually only affecting in interception
#			mode. Make sure to protect forwarding with suitable
#			http_access rules when using this.
#
#
#	SSL Bump Mode Options:
#	    In addition to these options ssl-bump requires TLS/SSL options.
#
#	   generate-host-certificates[=<on|off>]
#			Dynamically create SSL server certificates for the
#			destination hosts of bumped CONNECT requests.When 
#			enabled, the cert and key options are used to sign
#			generated certificates. Otherwise generated
#			certificate will be selfsigned.
#			If there is a CA certificate lifetime of the generated 
#			certificate equals lifetime of the CA certificate. If
#			generated certificate is selfsigned lifetime is three 
#			years.
#			This option is enabled by default when ssl-bump is used.
#			See the ssl-bump option above for more information.
#			
#	   dynamic_cert_mem_cache_size=SIZE
#			Approximate total RAM size spent on cached generated
#			certificates. If set to zero, caching is disabled. The
#			default value is 4MB.
#
#	TLS / SSL Options:
#
#	   cert=	Path to SSL certificate (PEM format).
#
#	   key=		Path to SSL private key file (PEM format)
#			if not specified, the certificate file is
#			assumed to be a combined certificate and
#			key file.
#
#	   version=	The version of SSL/TLS supported
#			    1	automatic (default)
#			    2	SSLv2 only
#			    3	SSLv3 only
#			    4	TLSv1.0 only
#			    5	TLSv1.1 only
#			    6	TLSv1.2 only
#
#	   cipher=	Colon separated list of supported ciphers.
#			NOTE: some ciphers such as EDH ciphers depend on
#			      additional settings. If those settings are
#			      omitted the ciphers may be silently ignored
#			      by the OpenSSL library.
#
#	   options=	Various SSL implementation options. The most important
#			being:
#			    NO_SSLv2    Disallow the use of SSLv2
#			    NO_SSLv3    Disallow the use of SSLv3
#			    NO_TLSv1    Disallow the use of TLSv1.0
#			    NO_TLSv1_1  Disallow the use of TLSv1.1
#			    NO_TLSv1_2  Disallow the use of TLSv1.2
#			    SINGLE_DH_USE Always create a new key when using
#				      temporary/ephemeral DH key exchanges
#			    NO_TICKET Disables TLS tickets extension
#			    ALL       Enable various bug workarounds
#				      suggested as "harmless" by OpenSSL
#				      Be warned that this reduces SSL/TLS
#				      strength to some attacks.
#			See OpenSSL SSL_CTX_set_options documentation for a
#			complete list of options.
#
#	   clientca=	File containing the list of CAs to use when
#			requesting a client certificate.
#
#	   cafile=	File containing additional CA certificates to
#			use when verifying client certificates. If unset
#			clientca will be used.
#
#	   capath=	Directory containing additional CA certificates
#			and CRL lists to use when verifying client certificates.
#
#	   crlfile=	File of additional CRL lists to use when verifying
#			the client certificate, in addition to CRLs stored in
#			the capath. Implies VERIFY_CRL flag below.
#
#	   dhparams=	File containing DH parameters for temporary/ephemeral
#			DH key exchanges. See OpenSSL documentation for details
#			on how to create this file.
#			WARNING: EDH ciphers will be silently disabled if this
#				 option is not set.
#
#	   sslflags=	Various flags modifying the use of SSL:
#			    DELAYED_AUTH
#				Don't request client certificates
#				immediately, but wait until acl processing
#				requires a certificate (not yet implemented).
#			    NO_DEFAULT_CA
#				Don't use the default CA lists built in
#				to OpenSSL.
#			    NO_SESSION_REUSE
#				Don't allow for session reuse. Each connection
#				will result in a new SSL session.
#			    VERIFY_CRL
#				Verify CRL lists when accepting client
#				certificates.
#			    VERIFY_CRL_ALL
#				Verify CRL lists for all certificates in the
#				client certificate chain.
#
#	   sslcontext=	SSL session ID context identifier.
#
#	Other Options:
#
#	   connection-auth[=on|off]
#	                use connection-auth=off to tell Squid to prevent 
#	                forwarding Microsoft connection oriented authentication
#			(NTLM, Negotiate and Kerberos)
#
#	   disable-pmtu-discovery=
#			Control Path-MTU discovery usage:
#			    off		lets OS decide on what to do (default).
#			    transparent	disable PMTU discovery when transparent
#					support is enabled.
#			    always	disable always PMTU discovery.
#
#			In many setups of transparently intercepting proxies
#			Path-MTU discovery can not work on traffic towards the
#			clients. This is the case when the intercepting device
#			does not fully track connections and fails to forward
#			ICMP must fragment messages to the cache server. If you
#			have such setup and experience that certain clients
#			sporadically hang or never complete requests set
#			disable-pmtu-discovery option to 'transparent'.
#
#	   name=	Specifies a internal name for the port. Defaults to
#			the port specification (port or addr:port)
#
#	   tcpkeepalive[=idle,interval,timeout]
#			Enable TCP keepalive probes of idle connections.
#			In seconds; idle is the initial time before TCP starts
#			probing the connection, interval how often to probe, and
#			timeout the time before giving up.
#
#	   require-proxy-header
#			Require PROXY protocol version 1 or 2 connections.
#			The proxy_protocol_access is required to whitelist
#			downstream proxies which can be trusted.
#
#	If you run Squid on a dual-homed machine with an internal
#	and an external interface we recommend you to specify the
#	internal address:port in http_port. This way Squid will only be
#	visible on the internal address.
#
#

# Squid normally listens to port 3128
http_port 3128

#  TAG: https_port
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Usage:  [ip:]port cert=certificate.pem [key=key.pem] [mode] [options...]
#
#	The socket address where Squid will listen for client requests made
#	over TLS or SSL connections. Commonly referred to as HTTPS.
#
#	This is most useful for situations where you are running squid in
#	accelerator mode and you want to do the SSL work at the accelerator level.
#
#	You may specify multiple socket addresses on multiple lines,
#	each with their own SSL certificate and/or options.
#
#	Modes:
#
#	   accel	Accelerator / reverse proxy mode
#
#	   intercept	Support for IP-Layer interception of
#			outgoing requests without browser settings.
#			NP: disables authentication and IPv6 on the port.
#
#	   tproxy	Support Linux TPROXY for spoofing outgoing
#			connections using the client IP address.
#			NP: disables authentication and maybe IPv6 on the port.
#
#	   ssl-bump	For each intercepted connection allowed by ssl_bump
#			ACLs, establish a secure connection with the client and with
#			the server, decrypt HTTPS messages as they pass through
#			Squid, and treat them as unencrypted HTTP messages,
#			becoming the man-in-the-middle.
#
#			An "ssl_bump server-first" match is required to
#			fully enable bumping of intercepted SSL	connections.
#
#			Requires tproxy or intercept.
#
#	Omitting the mode flag causes default forward proxy mode to be used.
#
#
#	See http_port for a list of generic options
#
#
#	SSL Options:
#
#	   cert=	Path to SSL certificate (PEM format).
#
#	   key=		Path to SSL private key file (PEM format)
#			if not specified, the certificate file is
#			assumed to be a combined certificate and
#			key file.
#
#	   version=	The version of SSL/TLS supported
#			    1	automatic (default)
#			    2	SSLv2 only
#			    3	SSLv3 only
#			    4	TLSv1 only
#
#	   cipher=	Colon separated list of supported ciphers.
#
#	   options=	Various SSL engine options. The most important
#			being:
#			    NO_SSLv2  Disallow the use of SSLv2
#			    NO_SSLv3  Disallow the use of SSLv3
#			    NO_TLSv1  Disallow the use of TLSv1
#			    SINGLE_DH_USE Always create a new key when using
#				      temporary/ephemeral DH key exchanges
#			See src/ssl_support.c or OpenSSL SSL_CTX_set_options
#			documentation for a complete list of options.
#
#	   clientca=	File containing the list of CAs to use when
#			requesting a client certificate.
#
#	   cafile=	File containing additional CA certificates to
#			use when verifying client certificates. If unset
#			clientca will be used.
#
#	   capath=	Directory containing additional CA certificates
#			and CRL lists to use when verifying client certificates.
#
#	   crlfile=	File of additional CRL lists to use when verifying
#			the client certificate, in addition to CRLs stored in
#			the capath. Implies VERIFY_CRL flag below.
#
#	   dhparams=	File containing DH parameters for temporary/ephemeral
#			DH key exchanges.
#
#	   sslflags=	Various flags modifying the use of SSL:
#			    DELAYED_AUTH
#				Don't request client certificates
#				immediately, but wait until acl processing
#				requires a certificate (not yet implemented).
#			    NO_DEFAULT_CA
#				Don't use the default CA lists built in
#				to OpenSSL.
#			    NO_SESSION_REUSE
#				Don't allow for session reuse. Each connection
#				will result in a new SSL session.
#			    VERIFY_CRL
#				Verify CRL lists when accepting client
#				certificates.
#			    VERIFY_CRL_ALL
#				Verify CRL lists for all certificates in the
#				client certificate chain.
#
#	   sslcontext=	SSL session ID context identifier.
#
#	   generate-host-certificates[=<on|off>]
#			Dynamically create SSL server certificates for the
#			destination hosts of bumped SSL requests.When
#			enabled, the cert and key options are used to sign
#			generated certificates. Otherwise generated
#			certificate will be selfsigned.
#			If there is CA certificate life time of generated
#			certificate equals lifetime of CA certificate. If
#			generated certificate is selfsigned lifetime is three
#			years.
#			This option is enabled by default when SslBump is used.
#			See the sslBump option above for more information.
#
#	   dynamic_cert_mem_cache_size=SIZE
#			Approximate total RAM size spent on cached generated
#			certificates. If set to zero, caching is disabled. The
#			default value is 4MB.
#
#	See http_port for a list of available options.
#Default:
# none

#  TAG: ftp_port
#	Enables Native FTP proxy by specifying the socket address where Squid
#	listens for FTP client requests. See http_port directive for various
#	ways to specify the listening address and mode.
#
#	Usage: ftp_port address [mode] [options]
#
#	WARNING: This is a new, experimental, complex feature that has seen
#	limited production exposure. Some Squid modules (e.g., caching) do not
#	currently work with native FTP proxying, and many features have not
#	even been tested for compatibility. Test well before deploying!
#
#	Native FTP proxying differs substantially from proxying HTTP requests
#	with ftp:// URIs because Squid works as an FTP server and receives
#	actual FTP commands (rather than HTTP requests with FTP URLs).
#
#	Native FTP commands accepted at ftp_port are internally converted or
#	wrapped into HTTP-like messages. The same happens to Native FTP
#	responses received from FTP origin servers. Those HTTP-like messages
#	are shoveled through regular access control and adaptation layers
#	between the FTP client and the FTP origin server. This allows Squid to
#	examine, adapt, block, and log FTP exchanges. Squid reuses most HTTP
#	mechanisms when shoveling wrapped FTP messages. For example,
#	http_access and adaptation_access directives are used.
#
#	Modes:
#
#	   intercept	Same as http_port intercept. The FTP origin address is
#			determined based on the intended destination of the
#			intercepted connection.
#
#	   tproxy	Support Linux TPROXY for spoofing outgoing
#			connections using the client IP address.
#			NP: disables authentication and maybe IPv6 on the port.
#
#	By default (i.e., without an explicit mode option), Squid extracts the
#	FTP origin address from the login@origin parameter of the FTP USER
#	command. Many popular FTP clients support such native FTP proxying.
#
#	Options:
#
#	   name=token	Specifies an internal name for the port. Defaults to
#			the port address. Usable with myportname ACL.
#
#	   ftp-track-dirs
#			Enables tracking of FTP directories by injecting extra
#			PWD commands and adjusting Request-URI (in wrapping
#			HTTP requests) to reflect the current FTP server
#			directory. Tracking is disabled by default.
#
#	   protocol=FTP	Protocol to reconstruct accelerated and intercepted
#			requests with. Defaults to FTP. No other accepted
#			values have been tested with. An unsupported value
#			results in a FATAL error. Accepted values are FTP,
#			HTTP (or HTTP/1.1), and HTTPS (or HTTPS/1.1).
#
#	Other http_port modes and options that are not specific to HTTP and
#	HTTPS may also work.
#Default:
# none

#  TAG: tcp_outgoing_tos
#	Allows you to select a TOS/Diffserv value for packets outgoing
#	on the server side, based on an ACL.
#
#	tcp_outgoing_tos ds-field [!]aclname ...
#
#	Example where normal_service_net uses the TOS value 0x00
#	and good_service_net uses 0x20
#
#	acl normal_service_net src 10.0.0.0/24
#	acl good_service_net src 10.0.1.0/24
#	tcp_outgoing_tos 0x00 normal_service_net
#	tcp_outgoing_tos 0x20 good_service_net
#
#	TOS/DSCP values really only have local significance - so you should
#	know what you're specifying. For more information, see RFC2474,
#	RFC2475, and RFC3260.
#
#	The TOS/DSCP byte must be exactly that - a octet value  0 - 255, or
#	"default" to use whatever default your host has.
#	Note that only multiples of 4 are usable as the two rightmost bits have
#	been redefined for use by ECN (RFC 3168 section 23.1).
#	The squid parser will enforce this by masking away the ECN bits.
#
#	Processing proceeds in the order specified, and stops at first fully
#	matching line.
#
#	Only fast ACLs are supported.
#Default:
# none

#  TAG: clientside_tos
#	Allows you to select a TOS/DSCP value for packets being transmitted
#	on the client-side, based on an ACL.
#
#	clientside_tos ds-field [!]aclname ...
#
#	Example where normal_service_net uses the TOS value 0x00
#	and good_service_net uses 0x20
#
#	acl normal_service_net src 10.0.0.0/24
#	acl good_service_net src 10.0.1.0/24
#	clientside_tos 0x00 normal_service_net
#	clientside_tos 0x20 good_service_net
#
#	Note: This feature is incompatible with qos_flows. Any TOS values set here
#	will be overwritten by TOS values in qos_flows.
#
#	The TOS/DSCP byte must be exactly that - a octet value  0 - 255, or
#	"default" to use whatever default your host has.
#	Note that only multiples of 4 are usable as the two rightmost bits have
#	been redefined for use by ECN (RFC 3168 section 23.1).
#	The squid parser will enforce this by masking away the ECN bits.
#
#Default:
# none

#  TAG: tcp_outgoing_mark
# Note: This option is only available if Squid is rebuilt with the
#       Packet MARK (Linux)
#
#	Allows you to apply a Netfilter mark value to outgoing packets
#	on the server side, based on an ACL.
#
#	tcp_outgoing_mark mark-value [!]aclname ...
#
#	Example where normal_service_net uses the mark value 0x00
#	and good_service_net uses 0x20
#
#	acl normal_service_net src 10.0.0.0/24
#	acl good_service_net src 10.0.1.0/24
#	tcp_outgoing_mark 0x00 normal_service_net
#	tcp_outgoing_mark 0x20 good_service_net
#
#	Only fast ACLs are supported.
#Default:
# none

#  TAG: clientside_mark
# Note: This option is only available if Squid is rebuilt with the
#       Packet MARK (Linux)
#
#	Allows you to apply a Netfilter mark value to packets being transmitted
#	on the client-side, based on an ACL.
#
#	clientside_mark mark-value [!]aclname ...
#
#	Example where normal_service_net uses the mark value 0x00
#	and good_service_net uses 0x20
#
#	acl normal_service_net src 10.0.0.0/24
#	acl good_service_net src 10.0.1.0/24
#	clientside_mark 0x00 normal_service_net
#	clientside_mark 0x20 good_service_net
#
#	Note: This feature is incompatible with qos_flows. Any mark values set here
#	will be overwritten by mark values in qos_flows.
#Default:
# none

#  TAG: qos_flows
#	Allows you to select a TOS/DSCP value to mark outgoing
#	connections to the client, based on where the reply was sourced.
#	For platforms using netfilter, allows you to set a netfilter mark
#	value instead of, or in addition to, a TOS value.
#
#	By default this functionality is disabled. To enable it with the default
#	settings simply use "qos_flows mark" or "qos_flows tos". Default
#	settings will result in the netfilter mark or TOS value being copied
#	from the upstream connection to the client. Note that it is the connection
#	CONNMARK value not the packet MARK value that is copied.
#
#	It is not currently possible to copy the mark or TOS value from the
#	client to the upstream connection request.
#
#	TOS values really only have local significance - so you should
#	know what you're specifying. For more information, see RFC2474,
#	RFC2475, and RFC3260.
#
#	The TOS/DSCP byte must be exactly that - a octet value  0 - 255.
#	Note that only multiples of 4 are usable as the two rightmost bits have
#	been redefined for use by ECN (RFC 3168 section 23.1).
#	The squid parser will enforce this by masking away the ECN bits.
#
#	Mark values can be any unsigned 32-bit integer value.
#
#	This setting is configured by setting the following values:
#
#	tos|mark                Whether to set TOS or netfilter mark values
#
#	local-hit=0xFF		Value to mark local cache hits.
#
#	sibling-hit=0xFF	Value to mark hits from sibling peers.
#
#	parent-hit=0xFF		Value to mark hits from parent peers.
#
#	miss=0xFF[/mask]	Value to mark cache misses. Takes precedence
#				over the preserve-miss feature (see below), unless
#				mask is specified, in which case only the bits
#				specified in the mask are written.
#
#	The TOS variant of the following features are only possible on Linux
#	and require your kernel to be patched with the TOS preserving ZPH
#	patch, available from http://zph.bratcheda.org
#	No patch is needed to preserve the netfilter mark, which will work
#	with all variants of netfilter.
#
#	disable-preserve-miss
#		This option disables the preservation of the TOS or netfilter
#		mark. By default, the existing TOS or netfilter mark value of
#		the response coming from the remote server will be retained
#		and masked with miss-mark.
#		NOTE: in the case of a netfilter mark, the mark must be set on
#		the connection (using the CONNMARK target) not on the packet
#		(MARK target).
#
#	miss-mask=0xFF
#		Allows you to mask certain bits in the TOS or mark value
#		received from the remote server, before copying the value to
#		the TOS sent towards clients.
#		Default for tos: 0xFF (TOS from server is not changed).
#		Default for mark: 0xFFFFFFFF (mark from server is not changed).
#
#	All of these features require the --enable-zph-qos compilation flag
#	(enabled by default). Netfilter marking also requires the
#	libnetfilter_conntrack libraries (--with-netfilter-conntrack) and
#	libcap 2.09+ (--with-libcap).
#
#Default:
# none

#  TAG: tcp_outgoing_address
#	Allows you to map requests to different outgoing IP addresses
#	based on the username or source address of the user making
#	the request.
#
#	tcp_outgoing_address ipaddr [[!]aclname] ...
#
#	For example;
#		Forwarding clients with dedicated IPs for certain subnets.
#
#	  acl normal_service_net src 10.0.0.0/24
#	  acl good_service_net src 10.0.2.0/24
#
#	  tcp_outgoing_address 2001:db8::c001 good_service_net
#	  tcp_outgoing_address 10.1.0.2 good_service_net
#
#	  tcp_outgoing_address 2001:db8::beef normal_service_net
#	  tcp_outgoing_address 10.1.0.1 normal_service_net
#
#	  tcp_outgoing_address 2001:db8::1
#	  tcp_outgoing_address 10.1.0.3
#
#	Processing proceeds in the order specified, and stops at first fully
#	matching line.
#
#	Squid will add an implicit IP version test to each line.
#	Requests going to IPv4 websites will use the outgoing 10.1.0.* addresses.
#	Requests going to IPv6 websites will use the outgoing 2001:db8:* addresses.
#
#
#	NOTE: The use of this directive using client dependent ACLs is
#	incompatible with the use of server side persistent connections. To
#	ensure correct results it is best to set server_persistent_connections
#	to off when using this directive in such configurations.
#
#	NOTE: The use of this directive to set a local IP on outgoing TCP links
#	is incompatible with using TPROXY to set client IP out outbound TCP links.
#	When needing to contact peers use the no-tproxy cache_peer option and the
#	client_dst_passthru directive re-enable normal forwarding such as this.
#
#Default:
# Address selection is performed by the operating system.

#  TAG: host_verify_strict
#	Regardless of this option setting, when dealing with intercepted
#	traffic, Squid always verifies that the destination IP address matches
#	the Host header domain or IP (called 'authority form URL').
#	
#	This enforcement is performed to satisfy a MUST-level requirement in
#	RFC 2616 section 14.23: "The Host field value MUST represent the naming
#	authority of the origin server or gateway given by the original URL".
#	
#	When set to ON:
#		Squid always responds with an HTTP 409 (Conflict) error
#		page and logs a security warning if there is no match.
#	
#		Squid verifies that the destination IP address matches
#		the Host header for forward-proxy and reverse-proxy traffic
#		as well. For those traffic types, Squid also enables the
#		following checks, comparing the corresponding Host header
#		and Request-URI components:
#	
#		 * The host names (domain or IP) must be identical,
#		   but valueless or missing Host header disables all checks.
#		   For the two host names to match, both must be either IP
#		   or FQDN.
#	
#		 * Port numbers must be identical, but if a port is missing
#		   the scheme-default port is assumed.
#	
#	
#	When set to OFF (the default):
#		Squid allows suspicious requests to continue but logs a
#		security warning and blocks caching of the response.
#	
#		 * Forward-proxy traffic is not checked at all.
#	
#		 * Reverse-proxy traffic is not checked at all.
#	
#		 * Intercepted traffic which passes verification is handled
#		   according to client_dst_passthru.
#	
#		 * Intercepted requests which fail verification are sent
#		   to the client original destination instead of DIRECT.
#		   This overrides 'client_dst_passthru off'.
#	
#		For now suspicious intercepted CONNECT requests are always
#		responded to with an HTTP 409 (Conflict) error page.
#	
#	
#	SECURITY NOTE:
#	
#	As described in CVE-2009-0801 when the Host: header alone is used
#	to determine the destination of a request it becomes trivial for
#	malicious scripts on remote websites to bypass browser same-origin
#	security policy and sandboxing protections.
#	
#	The cause of this is that such applets are allowed to perform their
#	own HTTP stack, in which case the same-origin policy of the browser
#	sandbox only verifies that the applet tries to contact the same IP
#	as from where it was loaded at the IP level. The Host: header may
#	be different from the connected IP and approved origin.
#	
#Default:
# host_verify_strict off

#  TAG: client_dst_passthru
#	With NAT or TPROXY intercepted traffic Squid may pass the request
#	directly to the original client destination IP or seek a faster
#	source using the HTTP Host header.
#	
#	Using Host to locate alternative servers can provide faster
#	connectivity with a range of failure recovery options.
#	But can also lead to connectivity trouble when the client and
#	server are attempting stateful interactions unaware of the proxy.
#	
#	This option (on by default) prevents alternative DNS entries being
#	located to send intercepted traffic DIRECT to an origin server.
#	The clients original destination IP and port will be used instead.
#	
#	Regardless of this option setting, when dealing with intercepted
#	traffic Squid will verify the Host: header and any traffic which
#	fails Host verification will be treated as if this option were ON.
#	
#	see host_verify_strict for details on the verification process.
#Default:
# client_dst_passthru on

# SSL OPTIONS
# -----------------------------------------------------------------------------

#  TAG: ssl_unclean_shutdown
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Some browsers (especially MSIE) bugs out on SSL shutdown
#	messages.
#Default:
# ssl_unclean_shutdown off

#  TAG: ssl_engine
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	The OpenSSL engine to use. You will need to set this if you
#	would like to use hardware SSL acceleration for example.
#Default:
# none

#  TAG: sslproxy_client_certificate
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Client SSL Certificate to use when proxying https:// URLs
#Default:
# none

#  TAG: sslproxy_client_key
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Client SSL Key to use when proxying https:// URLs
#Default:
# none

#  TAG: sslproxy_version
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	SSL version level to use when proxying https:// URLs
#
#	The versions of SSL/TLS supported:
#
#	    1	automatic (default)
#	    2	SSLv2 only
#	    3	SSLv3 only
#	    4	TLSv1.0 only
#	    5	TLSv1.1 only
#	    6	TLSv1.2 only
#Default:
# automatic SSL/TLS version negotiation

#  TAG: sslproxy_options
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Colon (:) or comma (,) separated list of SSL implementation options
#	to use when proxying https:// URLs
#	
#	The most important being:
#
#	    NO_SSLv2    Disallow the use of SSLv2
#	    NO_SSLv3    Disallow the use of SSLv3
#	    NO_TLSv1    Disallow the use of TLSv1.0
#	    NO_TLSv1_1  Disallow the use of TLSv1.1
#	    NO_TLSv1_2  Disallow the use of TLSv1.2
#	    SINGLE_DH_USE
#		      Always create a new key when using temporary/ephemeral
#		      DH key exchanges
#	    SSL_OP_NO_TICKET
#		      Disable use of RFC5077 session tickets. Some servers
#		      may have problems understanding the TLS extension due
#		      to ambiguous specification in RFC4507.
#	    ALL       Enable various bug workarounds suggested as "harmless"
#		      by OpenSSL. Be warned that this may reduce SSL/TLS
#		      strength to some attacks.
#	
#	See the OpenSSL SSL_CTX_set_options documentation for a
#	complete list of possible options.
#	
#	WARNING: This directive takes a single token. If a space is used
#		 the value(s) after that space are SILENTLY IGNORED.
#Default:
# none

#  TAG: sslproxy_cipher
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	SSL cipher list to use when proxying https:// URLs
#
#	Colon separated list of supported ciphers.
#Default:
# none

#  TAG: sslproxy_cafile
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	file containing CA certificates to use when verifying server
#	certificates while proxying https:// URLs
#Default:
# none

#  TAG: sslproxy_capath
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	directory containing CA certificates to use when verifying
#	server certificates while proxying https:// URLs
#Default:
# none

#  TAG: sslproxy_session_ttl
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Sets the timeout value for SSL sessions
#Default:
# sslproxy_session_ttl 300

#  TAG: sslproxy_session_cache_size
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#        Sets the cache size to use for ssl session
#Default:
# sslproxy_session_cache_size 2 MB

#  TAG: sslproxy_cert_sign_hash
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Sets the hashing algorithm to use when signing generated certificates.
#	Valid algorithm names depend on the OpenSSL library used. The following
#	names are usually available: sha1, sha256, sha512, and md5. Please see
#	your OpenSSL library manual for the available hashes. By default, Squids
#	that support this option use sha256 hashes.
#
#	Squid does not forcefully purge cached certificates that were generated
#	with an algorithm other than the currently configured one. They remain
#	in the cache, subject to the regular cache eviction policy, and become
#	useful if the algorithm changes again.
#Default:
# none

#  TAG: ssl_bump
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	This option is consulted when a CONNECT request is received on
#	an http_port (or a new connection is intercepted at an
#	https_port), provided that port was configured with an ssl-bump
#	flag. The subsequent data on the connection is either treated as
#	HTTPS and decrypted OR tunneled at TCP level without decryption,
#	depending on the first matching bumping "action".
#
#	ssl_bump <action> [!]acl ...
#
#	The following bumping actions are currently supported:
#
#	    splice
#		Become a TCP tunnel without decrypting proxied traffic.
#		This is the default action.
#
#	    bump
#		Establish a secure connection with the server and, using a
#		mimicked server certificate, with the client.
#
#	    peek
#		Receive client (step SslBump1) or server (step SslBump2)
#		certificate while preserving the possibility of splicing the
#		connection. Peeking at the server certificate (during step 2)
#		usually precludes bumping of the connection at step 3.
#
#	    stare
#		Receive client (step SslBump1) or server (step SslBump2)
#		certificate while preserving the possibility of bumping the
#		connection. Staring at the server certificate (during step 2)
#		usually precludes splicing of the connection at step 3.
#
#	    terminate
#		Close client and server connections.
#
#	Backward compatibility actions available at step SslBump1:
#
#	    client-first
#		Bump the connection. Establish a secure connection with the
#		client first, then connect to the server. This old mode does
#		not allow Squid to mimic server SSL certificate and does not
#		work with intercepted SSL connections.
#
#	    server-first
#		Bump the connection. Establish a secure connection with the
#		server first, then establish a secure connection with the
#		client, using a mimicked server certificate. Works with both
#		CONNECT requests and intercepted SSL connections, but does
#		not allow to make decisions based on SSL handshake info.
#
#	    peek-and-splice
#		Decide whether to bump or splice the connection based on 
#		client-to-squid and server-to-squid SSL hello messages.
#		XXX: Remove.
#
#	    none
#		Same as the "splice" action.
#
#	All ssl_bump rules are evaluated at each of the supported bumping
#	steps.  Rules with actions that are impossible at the current step are
#	ignored. The first matching ssl_bump action wins and is applied at the
#	end of the current step. If no rules match, the splice action is used.
#	See the at_step ACL for a list of the supported SslBump steps.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#	See also: http_port ssl-bump, https_port ssl-bump, and acl at_step.
#
#
#	# Example: Bump all requests except those originating from
#	# localhost or those going to example.com.
#
#	acl broken_sites dstdomain .example.com
#	ssl_bump splice localhost
#	ssl_bump splice broken_sites
#	ssl_bump bump all
#Default:
# Become a TCP tunnel without decrypting proxied traffic.

#  TAG: sslproxy_flags
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Various flags modifying the use of SSL while proxying https:// URLs:
#	    DONT_VERIFY_PEER	Accept certificates that fail verification.
#				For refined control, see sslproxy_cert_error.
#	    NO_DEFAULT_CA	Don't use the default CA list built in
#				to OpenSSL.
#Default:
# none

#  TAG: sslproxy_cert_error
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Use this ACL to bypass server certificate validation errors.
#
#	For example, the following lines will bypass all validation errors
#	when talking to servers for example.com. All other
#	validation errors will result in ERR_SECURE_CONNECT_FAIL error.
#
#		acl BrokenButTrustedServers dstdomain example.com
#		sslproxy_cert_error allow BrokenButTrustedServers
#		sslproxy_cert_error deny all
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#	Using slow acl types may result in server crashes
#
#	Without this option, all server certificate validation errors
#	terminate the transaction to protect Squid and the client.
#
#	SQUID_X509_V_ERR_INFINITE_VALIDATION error cannot be bypassed
#	but should not happen unless your OpenSSL library is buggy.
#
#	SECURITY WARNING:
#		Bypassing validation errors is dangerous because an
#		error usually implies that the server cannot be trusted
#		and the connection may be insecure.
#
#	See also: sslproxy_flags and DONT_VERIFY_PEER.
#Default:
# Server certificate errors terminate the transaction.

#  TAG: sslproxy_cert_sign
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#
#        sslproxy_cert_sign <signing algorithm> acl ...
#
#        The following certificate signing algorithms are supported:
#
#	   signTrusted
#		Sign using the configured CA certificate which is usually
#		placed in and trusted by end-user browsers. This is the
#		default for trusted origin server certificates.
#
#	   signUntrusted
#		Sign to guarantee an X509_V_ERR_CERT_UNTRUSTED browser error.
#		This is the default for untrusted origin server certificates
#		that are not self-signed (see ssl::certUntrusted).
#
#	   signSelf
#		Sign using a self-signed certificate with the right CN to
#		generate a X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT error in the
#		browser. This is the default for self-signed origin server
#		certificates (see ssl::certSelfSigned).
#
#	This clause only supports fast acl types.
#
#	When sslproxy_cert_sign acl(s) match, Squid uses the corresponding
#	signing algorithm to generate the certificate and ignores all
#	subsequent sslproxy_cert_sign options (the first match wins). If no
#	acl(s) match, the default signing algorithm is determined by errors
#	detected when obtaining and validating the origin server certificate.
#
#	WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
#	be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
#	CONNECT request that carries a domain name. In all other cases (CONNECT
#	to an IP address or an intercepted SSL connection), Squid cannot detect
#	the domain mismatch at certificate generation time when
#	bump-server-first is used.
#Default:
# none

#  TAG: sslproxy_cert_adapt
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	
#	sslproxy_cert_adapt <adaptation algorithm> acl ...
#
#	The following certificate adaptation algorithms are supported:
#
#	   setValidAfter
#		Sets the "Not After" property to the "Not After" property of
#		the CA certificate used to sign generated certificates.
#
#	   setValidBefore
#		Sets the "Not Before" property to the "Not Before" property of
#		the CA certificate used to sign generated certificates.
#
#	   setCommonName or setCommonName{CN}
#		Sets Subject.CN property to the host name specified as a 
#		CN parameter or, if no explicit CN parameter was specified,
#		extracted from the CONNECT request. It is a misconfiguration
#		to use setCommonName without an explicit parameter for
#		intercepted or tproxied SSL connections.
#		
#	This clause only supports fast acl types.
#
#	Squid first groups sslproxy_cert_adapt options by adaptation algorithm.
#	Within a group, when sslproxy_cert_adapt acl(s) match, Squid uses the
#	corresponding adaptation algorithm to generate the certificate and
#	ignores all subsequent sslproxy_cert_adapt options in that algorithm's
#	group (i.e., the first match wins within each algorithm group). If no
#	acl(s) match, the default mimicking action takes place.
#
#	WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
#	be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
#	CONNECT request that carries a domain name. In all other cases (CONNECT
#	to an IP address or an intercepted SSL connection), Squid cannot detect
#	the domain mismatch at certificate generation time when
#	bump-server-first is used.
#Default:
# none

#  TAG: sslpassword_program
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Specify a program used for entering SSL key passphrases
#	when using encrypted SSL certificate keys. If not specified
#	keys must either be unencrypted, or Squid started with the -N
#	option to allow it to query interactively for the passphrase.
#
#	The key file name is given as argument to the program allowing
#	selection of the right password if you have multiple encrypted
#	keys.
#Default:
# none

# OPTIONS RELATING TO EXTERNAL SSL_CRTD 
# -----------------------------------------------------------------------------

#  TAG: sslcrtd_program
# Note: This option is only available if Squid is rebuilt with the
#       --enable-ssl-crtd
#
#	Specify the location and options of the executable for ssl_crtd process.
#	/usr/lib/squid/ssl_crtd program requires -s and -M parameters
#	For more information use:
#		/usr/lib/squid/ssl_crtd -h
#Default:
# sslcrtd_program /usr/lib/squid/ssl_crtd -s /var/lib/ssl_db -M 4MB

#  TAG: sslcrtd_children
# Note: This option is only available if Squid is rebuilt with the
#       --enable-ssl-crtd
#
#	The maximum number of processes spawn to service ssl server.
#	The maximum this may be safely set to is 32.
#	
#	The startup= and idle= options allow some measure of skew in your
#	tuning.
#	
#		startup=N
#	
#	Sets the minimum number of processes to spawn when Squid
#	starts or reconfigures. When set to zero the first request will
#	cause spawning of the first child process to handle it.
#	
#	Starting too few children temporary slows Squid under load while it
#	tries to spawn enough additional processes to cope with traffic.
#	
#		idle=N
#	
#	Sets a minimum of how many processes Squid is to try and keep available
#	at all times. When traffic begins to rise above what the existing
#	processes can handle this many more will be spawned up to the maximum
#	configured. A minimum setting of 1 is required.
#	
#	You must have at least one ssl_crtd process.
#Default:
# sslcrtd_children 32 startup=5 idle=1

#  TAG: sslcrtvalidator_program
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	Specify the location and options of the executable for ssl_crt_validator
#	process.
#
#	Usage:  sslcrtvalidator_program [ttl=n] [cache=n] path ...
#
#	Options:
#	  ttl=n         TTL in seconds for cached results. The default is 60 secs
#	  cache=n       limit the result cache size. The default value is 2048
#Default:
# none

#  TAG: sslcrtvalidator_children
# Note: This option is only available if Squid is rebuilt with the
#       --with-openssl
#
#	The maximum number of processes spawn to service SSL server.
#	The maximum this may be safely set to is 32.
#	
#	The startup= and idle= options allow some measure of skew in your
#	tuning.
#	
#		startup=N
#	
#	Sets the minimum number of processes to spawn when Squid
#	starts or reconfigures. When set to zero the first request will
#	cause spawning of the first child process to handle it.
#	
#	Starting too few children temporary slows Squid under load while it
#	tries to spawn enough additional processes to cope with traffic.
#	
#		idle=N
#	
#	Sets a minimum of how many processes Squid is to try and keep available
#	at all times. When traffic begins to rise above what the existing
#	processes can handle this many more will be spawned up to the maximum
#	configured. A minimum setting of 1 is required.
#
#		concurrency=
#	
#	The number of requests each certificate validator helper can handle in
#	parallel. A value of 0 indicates the certficate validator does not
#	support concurrency. Defaults to 1.
#	
#	When this directive is set to a value >= 1 then the protocol
#	used to communicate with the helper is modified to include
#	a request ID in front of the request/response. The request
#	ID from the request must be echoed back with the response
#	to that request.
#	
#	You must have at least one ssl_crt_validator process.
#Default:
# sslcrtvalidator_children 32 startup=5 idle=1 concurrency=1

# OPTIONS WHICH AFFECT THE NEIGHBOR SELECTION ALGORITHM
# -----------------------------------------------------------------------------

#  TAG: cache_peer
#	To specify other caches in a hierarchy, use the format:
#	
#		cache_peer hostname type http-port icp-port [options]
#	
#	For example,
#	
#	#                                        proxy  icp
#	#          hostname             type     port   port  options
#	#          -------------------- -------- ----- -----  -----------
#	cache_peer parent.foo.net       parent    3128  3130  default
#	cache_peer sib1.foo.net         sibling   3128  3130  proxy-only
#	cache_peer sib2.foo.net         sibling   3128  3130  proxy-only
#	cache_peer example.com          parent    80       0  default
#	cache_peer cdn.example.com      sibling   3128     0  
#	
#	      type:	either 'parent', 'sibling', or 'multicast'.
#	
#	proxy-port:	The port number where the peer accept HTTP requests.
#			For other Squid proxies this is usually 3128
#			For web servers this is usually 80
#	
#	  icp-port:	Used for querying neighbor caches about objects.
#			Set to 0 if the peer does not support ICP or HTCP.
#			See ICP and HTCP options below for additional details.
#	
#	
#	==== ICP OPTIONS ====
#	
#	You MUST also set icp_port and icp_access explicitly when using these options.
#	The defaults will prevent peer traffic using ICP.
#	
#	
#	no-query	Disable ICP queries to this neighbor.
#	
#	multicast-responder
#			Indicates the named peer is a member of a multicast group.
#			ICP queries will not be sent directly to the peer, but ICP
#			replies will be accepted from it.
#	
#	closest-only	Indicates that, for ICP_OP_MISS replies, we'll only forward
#			CLOSEST_PARENT_MISSes and never FIRST_PARENT_MISSes.
#	
#	background-ping
#			To only send ICP queries to this neighbor infrequently.
#			This is used to keep the neighbor round trip time updated
#			and is usually used in conjunction with weighted-round-robin.
#	
#	
#	==== HTCP OPTIONS ====
#	
#	You MUST also set htcp_port and htcp_access explicitly when using these options.
#	The defaults will prevent peer traffic using HTCP.
#	
#	
#	htcp		Send HTCP, instead of ICP, queries to the neighbor.
#			You probably also want to set the "icp-port" to 4827
#			instead of 3130. This directive accepts a comma separated
#			list of options described below.
#	
#	htcp=oldsquid	Send HTCP to old Squid versions (2.5 or earlier).
#	
#	htcp=no-clr	Send HTCP to the neighbor but without
#			sending any CLR requests.  This cannot be used with
#			only-clr.
#	
#	htcp=only-clr	Send HTCP to the neighbor but ONLY CLR requests.
#			This cannot be used with no-clr.
#	
#	htcp=no-purge-clr
#			Send HTCP to the neighbor including CLRs but only when
#			they do not result from PURGE requests.
#	
#	htcp=forward-clr
#			Forward any HTCP CLR requests this proxy receives to the peer.
#	
#	
#	==== PEER SELECTION METHODS ====
#	
#	The default peer selection method is ICP, with the first responding peer
#	being used as source. These options can be used for better load balancing.
#	
#	
#	default		This is a parent cache which can be used as a "last-resort"
#			if a peer cannot be located by any of the peer-selection methods.
#			If specified more than once, only the first is used.
#	
#	round-robin	Load-Balance parents which should be used in a round-robin
#			fashion in the absence of any ICP queries.
#			weight=N can be used to add bias.
#	
#	weighted-round-robin
#			Load-Balance parents which should be used in a round-robin
#			fashion with the frequency of each parent being based on the
#			round trip time. Closer parents are used more often.
#			Usually used for background-ping parents.
#			weight=N can be used to add bias.
#	
#	carp		Load-Balance parents which should be used as a CARP array.
#			The requests will be distributed among the parents based on the
#			CARP load balancing hash function based on their weight.
#	
#	userhash	Load-balance parents based on the client proxy_auth or ident username.
#	
#	sourcehash	Load-balance parents based on the client source IP.
#
#	multicast-siblings
#			To be used only for cache peers of type "multicast".
#			ALL members of this multicast group have "sibling"
#			relationship with it, not "parent".  This is to a multicast
#			group when the requested object would be fetched only from
#			a "parent" cache, anyway.  It's useful, e.g., when
#			configuring a pool of redundant Squid proxies, being
#			members of the same multicast group.
#	
#	
#	==== PEER SELECTION OPTIONS ====
#	
#	weight=N	use to affect the selection of a peer during any weighted
#			peer-selection mechanisms.
#			The weight must be an integer; default is 1,
#			larger weights are favored more.
#			This option does not affect parent selection if a peering
#			protocol is not in use.
#	
#	basetime=N	Specify a base amount to be subtracted from round trip
#			times of parents.
#			It is subtracted before division by weight in calculating
#			which parent to fectch from. If the rtt is less than the
#			base time the rtt is set to a minimal value.
#	
#	ttl=N		Specify a TTL to use when sending multicast ICP queries
#			to this address.
#			Only useful when sending to a multicast group.
#			Because we don't accept ICP replies from random
#			hosts, you must configure other group members as
#			peers with the 'multicast-responder' option.
#	
#	no-delay	To prevent access to this neighbor from influencing the
#			delay pools.
#	
#	digest-url=URL	Tell Squid to fetch the cache digest (if digests are
#			enabled) for this host from the specified URL rather
#			than the Squid default location.
#	
#	
#	==== CARP OPTIONS ====
#	
#	carp-key=key-specification
#			use a different key than the full URL to hash against the peer.
#			the key-specification is a comma-separated list of the keywords			
#			scheme, host, port, path, params
#			Order is not important.
#	
#	==== ACCELERATOR / REVERSE-PROXY OPTIONS ====
#	
#	originserver	Causes this parent to be contacted as an origin server.
#			Meant to be used in accelerator setups when the peer
#			is a web server.
#	
#	forceddomain=name
#			Set the Host header of requests forwarded to this peer.
#			Useful in accelerator setups where the server (peer)
#			expects a certain domain name but clients may request
#			others. ie example.com or www.example.com
#	
#	no-digest	Disable request of cache digests.
#	
#	no-netdb-exchange
#			Disables requesting ICMP RTT database (NetDB).
#	
#	
#	==== AUTHENTICATION OPTIONS ====
#	
#	login=user:password
#			If this is a personal/workgroup proxy and your parent
#			requires proxy authentication.
#			
#			Note: The string can include URL escapes (i.e. %20 for
#			spaces). This also means % must be written as %%.
#	
#	login=PASSTHRU
#			Send login details received from client to this peer.
#			Both Proxy- and WWW-Authorization headers are passed
#			without alteration to the peer.
#			Authentication is not required by Squid for this to work.
#			
#			Note: This will pass any form of authentication but
#			only Basic auth will work through a proxy unless the
#			connection-auth options are also used.
#
#	login=PASS	Send login details received from client to this peer.
#			Authentication is not required by this option.
#			
#			If there are no client-provided authentication headers
#			to pass on, but username and password are available
#			from an external ACL user= and password= result tags
#			they may be sent instead.
#			
#			Note: To combine this with proxy_auth both proxies must
#			share the same user database as HTTP only allows for
#			a single login (one for proxy, one for origin server).
#			Also be warned this will expose your users proxy
#			password to the peer. USE WITH CAUTION
#	
#	login=*:password
#			Send the username to the upstream cache, but with a
#			fixed password. This is meant to be used when the peer
#			is in another administrative domain, but it is still
#			needed to identify each user.
#			The star can optionally be followed by some extra
#			information which is added to the username. This can
#			be used to identify this proxy to the peer, similar to
#			the login=username:password option above.
#	
#	login=NEGOTIATE
#			If this is a personal/workgroup proxy and your parent
#			requires a secure proxy authentication.
#			The first principal from the default keytab or defined by
#			the environment variable KRB5_KTNAME will be used. 
#	
#			WARNING: The connection may transmit requests from multiple
#			clients. Negotiate often assumes end-to-end authentication
#			and a single-client. Which is not strictly true here.
#	
#	login=NEGOTIATE:principal_name
#			If this is a personal/workgroup proxy and your parent
#			requires a secure proxy authentication. 
#			The principal principal_name from the default keytab or
#			defined by the environment variable KRB5_KTNAME will be
#			used.
#	
#			WARNING: The connection may transmit requests from multiple
#			clients. Negotiate often assumes end-to-end authentication
#			and a single-client. Which is not strictly true here.
#	
#	connection-auth=on|off
#			Tell Squid that this peer does or not support Microsoft
#			connection oriented authentication, and any such
#			challenges received from there should be ignored.
#			Default is auto to automatically determine the status
#			of the peer.
#	
#	
#	==== SSL / HTTPS / TLS OPTIONS ====
#	
#	ssl		Encrypt connections to this peer with SSL/TLS.
#	
#	sslcert=/path/to/ssl/certificate
#			A client SSL certificate to use when connecting to
#			this peer.
#	
#	sslkey=/path/to/ssl/key
#			The private SSL key corresponding to sslcert above.
#			If 'sslkey' is not specified 'sslcert' is assumed to
#			reference a combined file containing both the
#			certificate and the key.
#
#	Notes:
#	
#	On Debian/Ubuntu systems a default snakeoil certificate is
#    available in /etc/ssl and users can set:
#
#		cert=/etc/ssl/certs/ssl-cert-snakeoil.pem
#
#	and
#
#		key=/etc/ssl/private/ssl-cert-snakeoil.key
#
#	for testing.
#	
#	sslversion=1|2|3|4|5|6
#			The SSL version to use when connecting to this peer
#				1 = automatic (default)
#				2 = SSL v2 only
#				3 = SSL v3 only
#				4 = TLS v1.0 only
#				5 = TLS v1.1 only
#				6 = TLS v1.2 only
#	
#	sslcipher=...	The list of valid SSL ciphers to use when connecting
#			to this peer.
#	
#	ssloptions=... 	Specify various SSL implementation options:
#
#			    NO_SSLv2    Disallow the use of SSLv2
#			    NO_SSLv3    Disallow the use of SSLv3
#			    NO_TLSv1    Disallow the use of TLSv1.0
#			    NO_TLSv1_1  Disallow the use of TLSv1.1
#			    NO_TLSv1_2  Disallow the use of TLSv1.2
#			    SINGLE_DH_USE
#				      Always create a new key when using
#				      temporary/ephemeral DH key exchanges
#			    ALL       Enable various bug workarounds
#				      suggested as "harmless" by OpenSSL
#				      Be warned that this reduces SSL/TLS
#				      strength to some attacks.
#
#			See the OpenSSL SSL_CTX_set_options documentation for a
#			more complete list.
#	
#	sslcafile=... 	A file containing additional CA certificates to use
#			when verifying the peer certificate.
#	
#	sslcapath=...	A directory containing additional CA certificates to
#			use when verifying the peer certificate.
#	
#	sslcrlfile=... 	A certificate revocation list file to use when
#			verifying the peer certificate.
#	
#	sslflags=...	Specify various flags modifying the SSL implementation:
#	
#			DONT_VERIFY_PEER
#				Accept certificates even if they fail to
#				verify.
#			NO_DEFAULT_CA
#				Don't use the default CA list built in
#				to OpenSSL.
#			DONT_VERIFY_DOMAIN
#				Don't verify the peer certificate
#				matches the server name
#	
#	ssldomain= 	The peer name as advertised in it's certificate.
#			Used for verifying the correctness of the received peer
#			certificate. If not specified the peer hostname will be
#			used.
#	
#	front-end-https
#			Enable the "Front-End-Https: On" header needed when
#			using Squid as a SSL frontend in front of Microsoft OWA.
#			See MS KB document Q307347 for details on this header.
#			If set to auto the header will only be added if the
#			request is forwarded as a https:// URL.
#	
#	
#	==== GENERAL OPTIONS ====
#	
#	connect-timeout=N
#			A peer-specific connect timeout.
#			Also see the peer_connect_timeout directive.
#	
#	connect-fail-limit=N
#			How many times connecting to a peer must fail before
#			it is marked as down. Standby connection failures
#			count towards this limit. Default is 10.
#	
#	allow-miss	Disable Squid's use of only-if-cached when forwarding
#			requests to siblings. This is primarily useful when
#			icp_hit_stale is used by the sibling. Excessive use
#			of this option may result in forwarding loops. One way
#			to prevent peering loops when using this option, is to
#			deny cache peer usage on requests from a peer:
#			acl fromPeer ...
#			cache_peer_access peerName deny fromPeer
#	
#	max-conn=N 	Limit the number of concurrent connections the Squid
#			may open to this peer, including already opened idle
#			and standby connections. There is no peer-specific
#			connection limit by default.
#	
#			A peer exceeding the limit is not used for new
#			requests unless a standby connection is available.
#	
#			max-conn currently works poorly with idle persistent
#			connections: When a peer reaches its max-conn limit,
#			and there are idle persistent connections to the peer,
#			the peer may not be selected because the limiting code
#			does not know whether Squid can reuse those idle
#			connections.
#	
#	standby=N	Maintain a pool of N "hot standby" connections to an
#			UP peer, available for requests when no idle
#			persistent connection is available (or safe) to use.
#			By default and with zero N, no such pool is maintained.
#			N must not exceed the max-conn limit (if any).
#	
#			At start or after reconfiguration, Squid opens new TCP
#			standby connections until there are N connections
#			available and then replenishes the standby pool as
#			opened connections are used up for requests. A used
#			connection never goes back to the standby pool, but
#			may go to the regular idle persistent connection pool
#			shared by all peers and origin servers.
#	
#			Squid never opens multiple new standby connections
#			concurrently.  This one-at-a-time approach minimizes
#			flooding-like effect on peers. Furthermore, just a few
#			standby connections should be sufficient in most cases
#			to supply most new requests with a ready-to-use
#			connection.
#	
#			Standby connections obey server_idle_pconn_timeout.
#			For the feature to work as intended, the peer must be
#			configured to accept and keep them open longer than
#			the idle timeout at the connecting Squid, to minimize
#			race conditions typical to idle used persistent
#			connections. Default request_timeout and
#			server_idle_pconn_timeout values ensure such a
#			configuration.
#	
#	name=xxx	Unique name for the peer.
#			Required if you have multiple peers on the same host
#			but different ports.
#			This name can be used in cache_peer_access and similar
#			directives to identify the peer.
#			Can be used by outgoing access controls through the
#			peername ACL type.
#	
#	no-tproxy	Do not use the client-spoof TPROXY support when forwarding
#			requests to this peer. Use normal address selection instead.
#			This overrides the spoof_client_ip ACL.
#	
#	proxy-only	objects fetched from the peer will not be stored locally.
#	
#Default:
# none

#  TAG: cache_peer_domain
#	Use to limit the domains for which a neighbor cache will be
#	queried.
#
#	Usage:
#		cache_peer_domain cache-host domain [domain ...]
#		cache_peer_domain cache-host !domain
#
#	For example, specifying
#
#		cache_peer_domain parent.foo.net	.edu
#
#	has the effect such that UDP query packets are sent to
#	'bigserver' only when the requested object exists on a
#	server in the .edu domain.  Prefixing the domainname
#	with '!' means the cache will be queried for objects
#	NOT in that domain.
#
#	NOTE:	* Any number of domains may be given for a cache-host,
#		  either on the same or separate lines.
#		* When multiple domains are given for a particular
#		  cache-host, the first matched domain is applied.
#		* Cache hosts with no domain restrictions are queried
#		  for all requests.
#		* There are no defaults.
#		* There is also a 'cache_peer_access' tag in the ACL
#		  section.
#Default:
# none

#  TAG: cache_peer_access
#	Restricts usage of cache_peer proxies.
#
#	Usage:
#		cache_peer_access peer-name allow|deny [!]aclname ...
#
#	For the required peer-name parameter, use either the value of the
#	cache_peer name=value parameter or, if name=value is missing, the
#	cache_peer hostname parameter.
#
#	This directive narrows down the selection of peering candidates, but
#	does not determine the order in which the selected candidates are
#	contacted. That order is determined by the peer selection algorithms
#	(see PEER SELECTION sections in the cache_peer documentation).
#
#	If a deny rule matches, the corresponding peer will not be contacted
#	for the current transaction -- Squid will not send ICP queries and
#	will not forward HTTP requests to that peer. An allow match leaves
#	the corresponding peer in the selection. The first match for a given
#	peer wins for that peer.
#
#	The relative order of cache_peer_access directives for the same peer
#	matters. The relative order of any two cache_peer_access directives
#	for different peers does not matter. To ease interpretation, it is a
#	good idea to group cache_peer_access directives for the same peer
#	together.
#
#	A single cache_peer_access directive may be evaluated multiple times
#	for a given transaction because individual peer selection algorithms
#	may check it independently from each other. These redundant checks
#	may be optimized away in future Squid versions.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# No peer usage restrictions.

#  TAG: neighbor_type_domain
#	Modify the cache_peer neighbor type when passing requests
#	about specific domains to the peer.
#
#	Usage:
#		 neighbor_type_domain neighbor parent|sibling domain domain ...
#
#	For example:
#		cache_peer foo.example.com parent 3128 3130
#		neighbor_type_domain foo.example.com sibling .au .de
#
#	The above configuration treats all requests to foo.example.com as a
#	parent proxy unless the request is for a .au or .de ccTLD domain name.
#Default:
# The peer type from cache_peer directive is used for all requests to that peer.

#  TAG: dead_peer_timeout	(seconds)
#	This controls how long Squid waits to declare a peer cache
#	as "dead."  If there are no ICP replies received in this
#	amount of time, Squid will declare the peer dead and not
#	expect to receive any further ICP replies.  However, it
#	continues to send ICP queries, and will mark the peer as
#	alive upon receipt of the first subsequent ICP reply.
#
#	This timeout also affects when Squid expects to receive ICP
#	replies from peers.  If more than 'dead_peer' seconds have
#	passed since the last ICP reply was received, Squid will not
#	expect to receive an ICP reply on the next query.  Thus, if
#	your time between requests is greater than this timeout, you
#	will see a lot of requests sent DIRECT to origin servers
#	instead of to your parents.
#Default:
# dead_peer_timeout 10 seconds

#  TAG: forward_max_tries
#	Controls how many different forward paths Squid will try
#	before giving up. See also forward_timeout.
#	
#	NOTE: connect_retries (default: none) can make each of these
#	possible forwarding paths be tried multiple times.
#Default:
# forward_max_tries 25

# MEMORY CACHE OPTIONS
# -----------------------------------------------------------------------------

#  TAG: cache_mem	(bytes)
#	NOTE: THIS PARAMETER DOES NOT SPECIFY THE MAXIMUM PROCESS SIZE.
#	IT ONLY PLACES A LIMIT ON HOW MUCH ADDITIONAL MEMORY SQUID WILL
#	USE AS A MEMORY CACHE OF OBJECTS. SQUID USES MEMORY FOR OTHER
#	THINGS AS WELL. SEE THE SQUID FAQ SECTION 8 FOR DETAILS.
#
#	'cache_mem' specifies the ideal amount of memory to be used
#	for:
#		* In-Transit objects
#		* Hot Objects
#		* Negative-Cached objects
#
#	Data for these objects are stored in 4 KB blocks.  This
#	parameter specifies the ideal upper limit on the total size of
#	4 KB blocks allocated.  In-Transit objects take the highest
#	priority.
#
#	In-transit objects have priority over the others.  When
#	additional space is needed for incoming data, negative-cached
#	and hot objects will be released.  In other words, the
#	negative-cached and hot objects will fill up any unused space
#	not needed for in-transit objects.
#
#	If circumstances require, this limit will be exceeded.
#	Specifically, if your incoming request rate requires more than
#	'cache_mem' of memory to hold in-transit objects, Squid will
#	exceed this limit to satisfy the new requests.  When the load
#	decreases, blocks will be freed until the high-water mark is
#	reached.  Thereafter, blocks will be used to store hot
#	objects.
#
#	If shared memory caching is enabled, Squid does not use the shared
#	cache space for in-transit objects, but they still consume as much
#	local memory as they need. For more details about the shared memory
#	cache, see memory_cache_shared.
#Default:
# cache_mem 256 MB

#  TAG: maximum_object_size_in_memory	(bytes)
#	Objects greater than this size will not be attempted to kept in
#	the memory cache. This should be set high enough to keep objects
#	accessed frequently in memory to improve performance whilst low
#	enough to keep larger objects from hoarding cache_mem.
#Default:
# maximum_object_size_in_memory 512 KB

#  TAG: memory_cache_shared	on|off
#	Controls whether the memory cache is shared among SMP workers.
#
#	The shared memory cache is meant to occupy cache_mem bytes and replace
#	the non-shared memory cache, although some entities may still be
#	cached locally by workers for now (e.g., internal and in-transit
#	objects may be served from a local memory cache even if shared memory
#	caching is enabled).
#
#	By default, the memory cache is shared if and only if all of the
#	following conditions are satisfied: Squid runs in SMP mode with
#	multiple workers, cache_mem is positive, and Squid environment
#	supports required IPC primitives (e.g., POSIX shared memory segments
#	and GCC-style atomic operations).
#
#	To avoid blocking locks, shared memory uses opportunistic algorithms
#	that do not guarantee that every cachable entity that could have been
#	shared among SMP workers will actually be shared.
#
#	Currently, entities exceeding 32KB in size cannot be shared.
#Default:
# "on" where supported if doing memory caching with multiple SMP workers.

#  TAG: memory_cache_mode
#	Controls which objects to keep in the memory cache (cache_mem)
#
#	always	Keep most recently fetched objects in memory (default)
#
#	disk	Only disk cache hits are kept in memory, which means
#		an object must first be cached on disk and then hit
#		a second time before cached in memory.
#
#	network	Only objects fetched from network is kept in memory
#Default:
# Keep the most recently fetched objects in memory

#  TAG: memory_replacement_policy
#	The memory replacement policy parameter determines which
#	objects are purged from memory when memory space is needed.
#
#	See cache_replacement_policy for details on algorithms.
#Default:
# memory_replacement_policy lru

# DISK CACHE OPTIONS
# -----------------------------------------------------------------------------

#  TAG: cache_replacement_policy
#	The cache replacement policy parameter determines which
#	objects are evicted (replaced) when disk space is needed.
#
#	    lru       : Squid's original list based LRU policy
#	    heap GDSF : Greedy-Dual Size Frequency
#	    heap LFUDA: Least Frequently Used with Dynamic Aging
#	    heap LRU  : LRU policy implemented using a heap
#
#	Applies to any cache_dir lines listed below this directive.
#
#	The LRU policies keeps recently referenced objects.
#
#	The heap GDSF policy optimizes object hit rate by keeping smaller
#	popular objects in cache so it has a better chance of getting a
#	hit.  It achieves a lower byte hit rate than LFUDA though since
#	it evicts larger (possibly popular) objects.
#
#	The heap LFUDA policy keeps popular objects in cache regardless of
#	their size and thus optimizes byte hit rate at the expense of
#	hit rate since one large, popular object will prevent many
#	smaller, slightly less popular objects from being cached.
#
#	Both policies utilize a dynamic aging mechanism that prevents
#	cache pollution that can otherwise occur with frequency-based
#	replacement policies.
#
#	NOTE: if using the LFUDA replacement policy you should increase
#	the value of maximum_object_size above its default of 4 MB to
#	to maximize the potential byte hit rate improvement of LFUDA.
#
#	For more information about the GDSF and LFUDA cache replacement
#	policies see http://www.hpl.hp.com/techreports/1999/HPL-1999-69.html
#	and http://fog.hpl.external.hp.com/techreports/98/HPL-98-173.html.
#Default:
# cache_replacement_policy lru

#  TAG: minimum_object_size	(bytes)
#	Objects smaller than this size will NOT be saved on disk.  The
#	value is specified in bytes, and the default is 0 KB, which
#	means all responses can be stored.
#Default:
# no limit

#  TAG: maximum_object_size	(bytes)
#	Set the default value for max-size parameter on any cache_dir.
#	The value is specified in bytes, and the default is 4 MB.
#	
#	If you wish to get a high BYTES hit ratio, you should probably
#	increase this (one 32 MB object hit counts for 3200 10KB
#	hits).
#	
#	If you wish to increase hit ratio more than you want to
#	save bandwidth you should leave this low.
#	
#	NOTE: if using the LFUDA replacement policy you should increase
#	this value to maximize the byte hit rate improvement of LFUDA!
#	See cache_replacement_policy for a discussion of this policy.
#Default:
# maximum_object_size 4 MB

#  TAG: cache_dir
#	Format:
#		cache_dir Type Directory-Name Fs-specific-data [options]
#
#	You can specify multiple cache_dir lines to spread the
#	cache among different disk partitions.
#
#	Type specifies the kind of storage system to use. Only "ufs"
#	is built by default. To enable any of the other storage systems
#	see the --enable-storeio configure option.
#
#	'Directory' is a top-level directory where cache swap
#	files will be stored.  If you want to use an entire disk
#	for caching, this can be the mount-point directory.
#	The directory must exist and be writable by the Squid
#	process.  Squid will NOT create this directory for you.
#
#	In SMP configurations, cache_dir must not precede the workers option
#	and should use configuration macros or conditionals to give each
#	worker interested in disk caching a dedicated cache directory.
#
#
#	====  The ufs store type  ====
#
#	"ufs" is the old well-known Squid storage format that has always
#	been there.
#
#	Usage:
#		cache_dir ufs Directory-Name Mbytes L1 L2 [options]
#
#	'Mbytes' is the amount of disk space (MB) to use under this
#	directory.  The default is 100 MB.  Change this to suit your
#	configuration.  Do NOT put the size of your disk drive here.
#	Instead, if you want Squid to use the entire disk drive,
#	subtract 20% and use that value.
#
#	'L1' is the number of first-level subdirectories which
#	will be created under the 'Directory'.  The default is 16.
#
#	'L2' is the number of second-level subdirectories which
#	will be created under each first-level directory.  The default
#	is 256.
#
#
#	====  The aufs store type  ====
#
#	"aufs" uses the same storage format as "ufs", utilizing
#	POSIX-threads to avoid blocking the main Squid process on
#	disk-I/O. This was formerly known in Squid as async-io.
#
#	Usage:
#		cache_dir aufs Directory-Name Mbytes L1 L2 [options]
#
#	see argument descriptions under ufs above
#
#
#	====  The diskd store type  ====
#
#	"diskd" uses the same storage format as "ufs", utilizing a
#	separate process to avoid blocking the main Squid process on
#	disk-I/O.
#
#	Usage:
#		cache_dir diskd Directory-Name Mbytes L1 L2 [options] [Q1=n] [Q2=n]
#
#	see argument descriptions under ufs above
#
#	Q1 specifies the number of unacknowledged I/O requests when Squid
#	stops opening new files. If this many messages are in the queues,
#	Squid won't open new files. Default is 64
#
#	Q2 specifies the number of unacknowledged messages when Squid
#	starts blocking.  If this many messages are in the queues,
#	Squid blocks until it receives some replies. Default is 72
#
#	When Q1 < Q2 (the default), the cache directory is optimized
#	for lower response time at the expense of a decrease in hit
#	ratio.  If Q1 > Q2, the cache directory is optimized for
#	higher hit ratio at the expense of an increase in response
#	time.
#
#
#	====  The rock store type  ====
#
#	Usage:
#	    cache_dir rock Directory-Name Mbytes [options]
#
#	The Rock Store type is a database-style storage. All cached
#	entries are stored in a "database" file, using fixed-size slots.
#	A single entry occupies one or more slots.
#
#	If possible, Squid using Rock Store creates a dedicated kid
#	process called "disker" to avoid blocking Squid worker(s) on disk
#	I/O. One disker kid is created for each rock cache_dir.  Diskers
#	are created only when Squid, running in daemon mode, has support
#	for the IpcIo disk I/O module.
#
#	swap-timeout=msec: Squid will not start writing a miss to or
#	reading a hit from disk if it estimates that the swap operation
#	will take more than the specified number of milliseconds. By
#	default and when set to zero, disables the disk I/O time limit
#	enforcement. Ignored when using blocking I/O module because
#	blocking synchronous I/O does not allow Squid to estimate the
#	expected swap wait time.
#
#	max-swap-rate=swaps/sec: Artificially limits disk access using
#	the specified I/O rate limit. Swap out requests that
#	would cause the average I/O rate to exceed the limit are
#	delayed. Individual swap in requests (i.e., hits or reads) are
#	not delayed, but they do contribute to measured swap rate and
#	since they are placed in the same FIFO queue as swap out
#	requests, they may wait longer if max-swap-rate is smaller.
#	This is necessary on file systems that buffer "too
#	many" writes and then start blocking Squid and other processes
#	while committing those writes to disk.  Usually used together
#	with swap-timeout to avoid excessive delays and queue overflows
#	when disk demand exceeds available disk "bandwidth". By default
#	and when set to zero, disables the disk I/O rate limit
#	enforcement. Currently supported by IpcIo module only.
#
#	slot-size=bytes: The size of a database "record" used for
#	storing cached responses. A cached response occupies at least
#	one slot and all database I/O is done using individual slots so
#	increasing this parameter leads to more disk space waste while
#	decreasing it leads to more disk I/O overheads. Should be a
#	multiple of your operating system I/O page size. Defaults to
#	16KBytes. A housekeeping header is stored with each slot and
#	smaller slot-sizes will be rejected. The header is smaller than
#	100 bytes.
#
#
#	==== COMMON OPTIONS ====
#
#	no-store	no new objects should be stored to this cache_dir.
#
#	min-size=n	the minimum object size in bytes this cache_dir
#			will accept.  It's used to restrict a cache_dir
#			to only store large objects (e.g. AUFS) while
#			other stores are optimized for smaller objects
#			(e.g. Rock).
#			Defaults to 0.
#
#	max-size=n	the maximum object size in bytes this cache_dir
#			supports.
#			The value in maximum_object_size directive sets
#			the default unless more specific details are
#			available (ie a small store capacity).
#
#	Note: To make optimal use of the max-size limits you should order
#	the cache_dir lines with the smallest max-size value first.
#
#Default:
# No disk cache. Store cache ojects only in memory.
#

# Uncomment and adjust the following to add a disk cache directory.
#cache_dir ufs /var/spool/squid 100 16 256

#  TAG: store_dir_select_algorithm
#	How Squid selects which cache_dir to use when the response
#	object will fit into more than one.
#
#	Regardless of which algorithm is used the cache_dir min-size
#	and max-size parameters are obeyed. As such they can affect
#	the selection algorithm by limiting the set of considered
#	cache_dir.
#
#	Algorithms:
#
#		least-load
#
#	This algorithm is suited to caches with similar cache_dir
#	sizes and disk speeds.
#
#	The disk with the least I/O pending is selected.
#	When there are multiple disks with the same I/O load ranking
#	the cache_dir with most available capacity is selected.
#
#	When a mix of cache_dir sizes are configured the faster disks
#	have a naturally lower I/O loading and larger disks have more
#	capacity. So space used to store objects and data throughput
#	may be very unbalanced towards larger disks.
#
#
#		round-robin
#
#	This algorithm is suited to caches with unequal cache_dir
#	disk sizes.
#
#	Each cache_dir is selected in a rotation. The next suitable
#	cache_dir is used.
#
#	Available cache_dir capacity is only considered in relation
#	to whether the object will fit and meets the min-size and
#	max-size parameters.
#
#	Disk I/O loading is only considered to prevent overload on slow
#	disks. This algorithm does not spread objects by size, so any
#	I/O loading per-disk may appear very unbalanced and volatile.
#
#	If several cache_dirs use similar min-size, max-size, or other
#	limits to to reject certain responses, then do not group such
#	cache_dir lines together, to avoid round-robin selection bias
#	towards the first cache_dir after the group. Instead, interleave
#	cache_dir lines from different groups. For example:
#
#		store_dir_select_algorithm round-robin
#		cache_dir rock /hdd1 ... min-size=100000
#		cache_dir rock /ssd1 ... max-size=99999
#		cache_dir rock /hdd2 ... min-size=100000
#		cache_dir rock /ssd2 ... max-size=99999
#		cache_dir rock /hdd3 ... min-size=100000
#		cache_dir rock /ssd3 ... max-size=99999
#Default:
# store_dir_select_algorithm least-load

#  TAG: max_open_disk_fds
#	To avoid having disk as the I/O bottleneck Squid can optionally
#	bypass the on-disk cache if more than this amount of disk file
#	descriptors are open.
#
#	A value of 0 indicates no limit.
#Default:
# no limit

#  TAG: cache_swap_low	(percent, 0-100)
#	The low-water mark for AUFS/UFS/diskd cache object eviction by
#	the cache_replacement_policy algorithm.
#
#	Removal begins when the swap (disk) usage of a cache_dir is
#	above this low-water mark and attempts to maintain utilization
#	near the low-water mark.
#
#	As swap utilization increases towards the high-water mark set
#	by cache_swap_high object eviction becomes more agressive.
#
#	The value difference in percentages between low- and high-water
#	marks represent an eviction rate of 300 objects per second and
#	the rate continues to scale in agressiveness by multiples of
#	this above the high-water mark.
#
#	Defaults are 90% and 95%. If you have a large cache, 5% could be
#	hundreds of MB. If this is the case you may wish to set these
#	numbers closer together.
#
#	See also cache_swap_high and cache_replacement_policy
#Default:
# cache_swap_low 90

#  TAG: cache_swap_high	(percent, 0-100)
#	The high-water mark for AUFS/UFS/diskd cache object eviction by
#	the cache_replacement_policy algorithm.
#
#	Removal begins when the swap (disk) usage of a cache_dir is
#	above the low-water mark set by cache_swap_low and attempts to
#	maintain utilization near the low-water mark.
#
#	As swap utilization increases towards this high-water mark object
#	eviction becomes more agressive.
#
#	The value difference in percentages between low- and high-water
#	marks represent an eviction rate of 300 objects per second and
#	the rate continues to scale in agressiveness by multiples of
#	this above the high-water mark.
#
#	Defaults are 90% and 95%. If you have a large cache, 5% could be
#	hundreds of MB. If this is the case you may wish to set these
#	numbers closer together.
#
#	See also cache_swap_low and cache_replacement_policy
#Default:
# cache_swap_high 95

# LOGFILE OPTIONS
# -----------------------------------------------------------------------------

#  TAG: logformat
#	Usage:
#
#	logformat <name> <format specification>
#
#	Defines an access log format.
#
#	The <format specification> is a string with embedded % format codes
#
#	% format codes all follow the same basic structure where all but
#	the formatcode is optional. Output strings are automatically escaped
#	as required according to their context and the output format
#	modifiers are usually not needed, but can be specified if an explicit
#	output format is desired.
#
#		% ["|[|'|#] [-] [[0]width] [{argument}] formatcode
#
#		"	output in quoted string format
#		[	output in squid text log format as used by log_mime_hdrs
#		#	output in URL quoted format
#		'	output as-is
#
#		-	left aligned
#
#		width	minimum and/or maximum field width:
#			    [width_min][.width_max]
#			When minimum starts with 0, the field is zero-padded.
#			String values exceeding maximum width are truncated.
#
#		{arg}	argument such as header name etc
#
#	Format codes:
#
#		%	a literal % character
#		sn	Unique sequence number per log line entry
#		err_code    The ID of an error response served by Squid or
#				a similar internal error identifier.
#		err_detail  Additional err_code-dependent error information.
#		note	The annotation specified by the argument. Also
#			logs the adaptation meta headers set by the
#			adaptation_meta configuration parameter.
#			If no argument given all annotations logged.
#			The argument may include a separator to use with
#			annotation values:
#                            name[:separator]
#			By default, multiple note values are separated with ","
#			and multiple notes are separated with "\r\n".
#			When logging named notes with %{name}note, the
#			explicitly configured separator is used between note
#			values. When logging all notes with %note, the
#			explicitly configured separator is used between
#			individual notes. There is currently no way to
#			specify both value and notes separators when logging
#			all notes with %note.
#
#	Connection related format codes:
#
#		>a	Client source IP address
#		>A	Client FQDN
#		>p	Client source port
#		>eui	Client source EUI (MAC address, EUI-48 or EUI-64 identifier)
#		>la	Local IP address the client connected to
#		>lp	Local port number the client connected to
#		>qos    Client connection TOS/DSCP value set by Squid
#		>nfmark Client connection netfilter mark set by Squid
#
#		la	Local listening IP address the client connection was connected to.
#		lp	Local listening port number the client connection was connected to.
#
#		<a	Server IP address of the last server or peer connection
#		<A	Server FQDN or peer name
#		<p	Server port number of the last server or peer connection
#		<la	Local IP address of the last server or peer connection
#		<lp     Local port number of the last server or peer connection
#		<qos	Server connection TOS/DSCP value set by Squid
#		<nfmark Server connection netfilter mark set by Squid
#
#	Time related format codes:
#
#		ts	Seconds since epoch
#		tu	subsecond time (milliseconds)
#		tl	Local time. Optional strftime format argument
#				default %d/%b/%Y:%H:%M:%S %z
#		tg	GMT time. Optional strftime format argument
#				default %d/%b/%Y:%H:%M:%S %z
#		tr	Response time (milliseconds)
#		dt	Total time spent making DNS lookups (milliseconds)
#		tS	Approximate master transaction start time in 
#			<full seconds since epoch>.<fractional seconds> format.
#			Currently, Squid considers the master transaction
#			started when a complete HTTP request header initiating
#			the transaction is received from the client. This is
#			the same value that Squid uses to calculate transaction
#			response time when logging %tr to access.log. Currently,
#			Squid uses millisecond resolution for %tS values,
#			similar to the default access.log "current time" field
#			(%ts.%03tu).
#
#	Access Control related format codes:
#
#		et	Tag returned by external acl
#		ea	Log string returned by external acl
#		un	User name (any available)
#		ul	User name from authentication
#		ue	User name from external acl helper
#		ui	User name from ident
#		un	A user name. Expands to the first available name
#			from the following list of information sources:
#			- authenticated user name, like %ul
#			- user name supplied by an external ACL, like %ue
#			- SSL client name, like %us
#			- ident user name, like %ui
#		credentials Client credentials. The exact meaning depends on
#			the authentication scheme: For Basic authentication,
#			it is the password; for Digest, the realm sent by the
#			client; for NTLM and Negotiate, the client challenge
#			or client credentials prefixed with "YR " or "KK ".
#
#	HTTP related format codes:
#
#	    REQUEST
#
#		[http::]rm	Request method (GET/POST etc)
#		[http::]>rm	Request method from client
#		[http::]<rm	Request method sent to server or peer
#		[http::]ru	Request URL from client (historic, filtered for logging)
#		[http::]>ru	Request URL from client
#		[http::]<ru	Request URL sent to server or peer
#		[http::]>rs	Request URL scheme from client
#		[http::]<rs	Request URL scheme sent to server or peer
#		[http::]>rd	Request URL domain from client
#		[http::]<rd	Request URL domain sent to server or peer
#		[http::]>rP	Request URL port from client
#		[http::]<rP	Request URL port sent to server or peer
#		[http::]rp	Request URL path excluding hostname
#		[http::]>rp	Request URL path excluding hostname from client
#		[http::]<rp	Request URL path excluding hostname sent to server or peer
#		[http::]rv	Request protocol version
#		[http::]>rv	Request protocol version from client
#		[http::]<rv	Request protocol version sent to server or peer
#
#		[http::]>h	Original received request header.
#				Usually differs from the request header sent by
#				Squid, although most fields are often preserved.
#				Accepts optional header field name/value filter
#				argument using name[:[separator]element] format.
#		[http::]>ha	Received request header after adaptation and
#				redirection (pre-cache REQMOD vectoring point).
#				Usually differs from the request header sent by
#				Squid, although most fields are often preserved.
#				Optional header name argument as for >h
#
#
#	    RESPONSE
#
#		[http::]<Hs	HTTP status code received from the next hop
#		[http::]>Hs	HTTP status code sent to the client
#
#		[http::]<h	Reply header. Optional header name argument
#				as for >h
#
#		[http::]mt	MIME content type
#
#
#	    SIZE COUNTERS
#
#		[http::]st	Total size of request + reply traffic with client
#		[http::]>st	Total size of request received from client.
#				Excluding chunked encoding bytes.
#		[http::]<st	Total size of reply sent to client (after adaptation)
#
#		[http::]>sh	Size of request headers received from client
#		[http::]<sh	Size of reply headers sent to client (after adaptation)
#
#		[http::]<sH	Reply high offset sent
#		[http::]<sS	Upstream object size
#
#		[http::]<bs	Number of HTTP-equivalent message body bytes 
#				received from the next hop, excluding chunked
#				transfer encoding and control messages.
#				Generated FTP/Gopher listings are treated as
#				received bodies.
#
#
#	    TIMING
#
#		[http::]<pt	Peer response time in milliseconds. The timer starts
#				when the last request byte is sent to the next hop
#				and stops when the last response byte is received.
#		[http::]<tt	Total time in milliseconds. The timer 
#				starts with the first connect request (or write I/O)
#				sent to the first selected peer. The timer stops
#				with the last I/O with the last peer.
#
#	Squid handling related format codes:
#
#		Ss	Squid request status (TCP_MISS etc)
#		Sh	Squid hierarchy status (DEFAULT_PARENT etc)
#
#	SSL-related format codes:
#
#		ssl::bump_mode	SslBump decision for the transaction:
#
#				For CONNECT requests that initiated bumping of
#				a connection and for any request received on
#				an already bumped connection, Squid logs the
#				corresponding SslBump mode ("server-first" or
#				"client-first"). See the ssl_bump option for
#				more information about these modes.
#
#				A "none" token is logged for requests that
#				triggered "ssl_bump" ACL evaluation matching
#				either a "none" rule or no rules at all.
#
#				In all other cases, a single dash ("-") is
#				logged.
#
#		ssl::>sni	SSL client SNI sent to Squid. Available only
#				after the peek, stare, or splice SSL bumping
#				actions.
#
#	If ICAP is enabled, the following code becomes available (as
#	well as ICAP log codes documented with the icap_log option):
#
#		icap::tt        Total ICAP processing time for the HTTP
#				transaction. The timer ticks when ICAP
#				ACLs are checked and when ICAP
#				transaction is in progress.
#
#	If adaptation is enabled the following three codes become available:
#
#		adapt::<last_h	The header of the last ICAP response or
#				meta-information from the last eCAP
#				transaction related to the HTTP transaction.
#				Like <h, accepts an optional header name
#				argument.
#
#		adapt::sum_trs Summed adaptation transaction response
#				times recorded as a comma-separated list in
#				the order of transaction start time. Each time
#				value is recorded as an integer number,
#				representing response time of one or more
#				adaptation (ICAP or eCAP) transaction in
#				milliseconds.  When a failed transaction is
#				being retried or repeated, its time is not
#				logged individually but added to the
#				replacement (next) transaction. See also:
#				adapt::all_trs.
#
#		adapt::all_trs All adaptation transaction response times.
#				Same as adaptation_strs but response times of
#				individual transactions are never added
#				together. Instead, all transaction response
#				times are recorded individually.
#
#	You can prefix adapt::*_trs format codes with adaptation
#	service name in curly braces to record response time(s) specific
#	to that service. For example: %{my_service}adapt::sum_trs
#
#	If SSL is enabled, the following formating codes become available:
#
#		%ssl::>cert_subject The Subject field of the received client
#				SSL certificate or a dash ('-') if Squid has
#				received an invalid/malformed certificate or
#				no certificate at all. Consider encoding the
#				logged value because Subject often has spaces.
#
#		%ssl::>cert_issuer The Issuer field of the received client
#				SSL certificate or a dash ('-') if Squid has
#				received an invalid/malformed certificate or
#				no certificate at all. Consider encoding the
#				logged value because Issuer often has spaces.
#
#	The default formats available (which do not need re-defining) are:
#
#logformat squid      %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt
#logformat common     %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st %Ss:%Sh
#logformat combined   %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh
#logformat referrer   %ts.%03tu %>a %{Referer}>h %ru
#logformat useragent  %>a [%tl] "%{User-Agent}>h"
#
#	NOTE: When the log_mime_hdrs directive is set to ON.
#		The squid, common and combined formats have a safely encoded copy
#		of the mime headers appended to each line within a pair of brackets.
#
#	NOTE: The common and combined formats are not quite true to the Apache definition.
#		The logs from Squid contain an extra status and hierarchy code appended.
#
#Default:
# The format definitions squid, common, combined, referrer, useragent are built in.

#  TAG: access_log
#	Configures whether and how Squid logs HTTP and ICP transactions.
#	If access logging is enabled, a single line is logged for every 
#	matching HTTP or ICP request. The recommended directive formats are:
#
#	access_log <module>:<place> [option ...] [acl acl ...]
#	access_log none [acl acl ...]
#
#	The following directive format is accepted but may be deprecated:
#	access_log <module>:<place> [<logformat name> [acl acl ...]]
#
#        In most cases, the first ACL name must not contain the '=' character
#	and should not be equal to an existing logformat name. You can always
#	start with an 'all' ACL to work around those restrictions.
#	
#	Will log to the specified module:place using the specified format (which
#	must be defined in a logformat directive) those entries which match
#	ALL the acl's specified (which must be defined in acl clauses).
#	If no acl is specified, all requests will be logged to this destination.
#	
#	===== Available options for the recommended directive format =====
#
#	logformat=name		Names log line format (either built-in or
#				defined by a logformat directive). Defaults
#				to 'squid'.
#
#	buffer-size=64KB	Defines approximate buffering limit for log
#				records (see buffered_logs).  Squid should not
#				keep more than the specified size and, hence,
#				should flush records before the buffer becomes
#				full to avoid overflows under normal
#				conditions (the exact flushing algorithm is
#				module-dependent though).  The on-error option
#				controls overflow handling.
#
#	on-error=die|drop	Defines action on unrecoverable errors. The
#				'drop' action ignores (i.e., does not log)
#				affected log records. The default 'die' action
#				kills the affected worker. The drop action 
#				support has not been tested for modules other
#				than tcp.
#
#	===== Modules Currently available =====
#	
#	none	Do not log any requests matching these ACL.
#		Do not specify Place or logformat name.
#	
#	stdio	Write each log line to disk immediately at the completion of
#		each request.
#		Place: the filename and path to be written.
#	
#	daemon	Very similar to stdio. But instead of writing to disk the log
#		line is passed to a daemon helper for asychronous handling instead.
#		Place: varies depending on the daemon.
#		
#		log_file_daemon Place: the file name and path to be written.
#	
#	syslog	To log each request via syslog facility.
#		Place: The syslog facility and priority level for these entries.
#		Place Format:  facility.priority
#
#		where facility could be any of:
#			authpriv, daemon, local0 ... local7 or user.
#
#		And priority could be any of:
#			err, warning, notice, info, debug.
#	
#	udp	To send each log line as text data to a UDP receiver.
#		Place: The destination host name or IP and port.
#		Place Format:   //host:port
#
#	tcp	To send each log line as text data to a TCP receiver.
#		Lines may be accumulated before sending (see buffered_logs).
#		Place: The destination host name or IP and port.
#		Place Format:   //host:port
#
#	Default:
#		access_log daemon:/var/log/squid/access.log squid
#Default:
# access_log daemon:/var/log/squid/access.log squid

#  TAG: icap_log
#	ICAP log files record ICAP transaction summaries, one line per
#	transaction.
#
#	The icap_log option format is:
#	icap_log <filepath> [<logformat name> [acl acl ...]]
#	icap_log none [acl acl ...]]
#	
#	Please see access_log option documentation for details. The two
#	kinds of logs share the overall configuration approach and many
#	features.
#
#	ICAP processing of a single HTTP message or transaction may
#	require multiple ICAP transactions.  In such cases, multiple
#	ICAP transaction log lines will correspond to a single access
#	log line.
#
#	ICAP log uses logformat codes that make sense for an ICAP
#	transaction. Header-related codes are applied to the HTTP header
#	embedded in an ICAP server response, with the following caveats:
#	For REQMOD, there is no HTTP response header unless the ICAP
#	server performed request satisfaction. For RESPMOD, the HTTP
#	request header is the header sent to the ICAP server. For
#	OPTIONS, there are no HTTP headers.
#
#	The following format codes are also available for ICAP logs:
#
#		icap::<A	ICAP server IP address. Similar to <A.
#
#		icap::<service_name	ICAP service name from the icap_service
#				option in Squid configuration file.
#
#		icap::ru	ICAP Request-URI. Similar to ru.
#
#		icap::rm	ICAP request method (REQMOD, RESPMOD, or 
#				OPTIONS). Similar to existing rm.
#
#		icap::>st	Bytes sent to the ICAP server (TCP payload
#				only; i.e., what Squid writes to the socket).
#
#		icap::<st	Bytes received from the ICAP server (TCP
#				payload only; i.e., what Squid reads from
#				the socket).
#
#		icap::<bs	Number of message body bytes received from the
#				ICAP server. ICAP message body, if any, usually
#				includes encapsulated HTTP message headers and
#				possibly encapsulated HTTP message body. The
#				HTTP body part is dechunked before its size is
#				computed.
#
#		icap::tr 	Transaction response time (in
#				milliseconds).  The timer starts when
#				the ICAP transaction is created and
#				stops when the transaction is completed.
#				Similar to tr.
#
#		icap::tio	Transaction I/O time (in milliseconds). The
#				timer starts when the first ICAP request
#				byte is scheduled for sending. The timers
#				stops when the last byte of the ICAP response
#				is received.
#
#		icap::to 	Transaction outcome: ICAP_ERR* for all
#				transaction errors, ICAP_OPT for OPTION
#				transactions, ICAP_ECHO for 204
#				responses, ICAP_MOD for message
#				modification, and ICAP_SAT for request
#				satisfaction. Similar to Ss.
#
#		icap::Hs	ICAP response status code. Similar to Hs.
#
#		icap::>h	ICAP request header(s). Similar to >h.
#
#		icap::<h	ICAP response header(s). Similar to <h.
#
#	The default ICAP log format, which can be used without an explicit
#	definition, is called icap_squid:
#
#logformat icap_squid %ts.%03tu %6icap::tr %>a %icap::to/%03icap::Hs %icap::<size %icap::rm %icap::ru% %un -/%icap::<A -
#
#	See also: logformat, log_icap, and %adapt::<last_h 
#Default:
# none

#  TAG: logfile_daemon
#	Specify the path to the logfile-writing daemon. This daemon is
#	used to write the access and store logs, if configured.
#
#	Squid sends a number of commands to the log daemon:
#	  L<data>\n - logfile data
#	  R\n - rotate file
#	  T\n - truncate file
#	  O\n - reopen file
#	  F\n - flush file
#	  r<n>\n - set rotate count to <n>
#	  b<n>\n - 1 = buffer output, 0 = don't buffer output
#
#	No responses is expected.
#Default:
# logfile_daemon /usr/lib/squid/log_file_daemon

#  TAG: stats_collection	allow|deny acl acl...
#	This options allows you to control which requests gets accounted
#	in performance counters.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow logging for all transactions.

#  TAG: cache_store_log
#	Logs the activities of the storage manager.  Shows which
#	objects are ejected from the cache, and which objects are
#	saved and for how long.
#	There are not really utilities to analyze this data, so you can safely
#	disable it (the default).
#	
#	Store log uses modular logging outputs. See access_log for the list
#	of modules supported.
#	
#	Example:
#		cache_store_log stdio:/var/log/squid/store.log
#		cache_store_log daemon:/var/log/squid/store.log
#Default:
# none

#  TAG: cache_swap_state
#	Location for the cache "swap.state" file. This index file holds
#	the metadata of objects saved on disk.  It is used to rebuild
#	the cache during startup.  Normally this file resides in each
#	'cache_dir' directory, but you may specify an alternate
#	pathname here.  Note you must give a full filename, not just
#	a directory. Since this is the index for the whole object
#	list you CANNOT periodically rotate it!
#
#	If %s can be used in the file name it will be replaced with a
#	a representation of the cache_dir name where each / is replaced
#	with '.'. This is needed to allow adding/removing cache_dir
#	lines when cache_swap_log is being used.
#
#	If have more than one 'cache_dir', and %s is not used in the name
#	these swap logs will have names such as:
#
#		cache_swap_log.00
#		cache_swap_log.01
#		cache_swap_log.02
#
#	The numbered extension (which is added automatically)
#	corresponds to the order of the 'cache_dir' lines in this
#	configuration file.  If you change the order of the 'cache_dir'
#	lines in this file, these index files will NOT correspond to
#	the correct 'cache_dir' entry (unless you manually rename
#	them).  We recommend you do NOT use this option.  It is
#	better to keep these index files in each 'cache_dir' directory.
#Default:
# Store the journal inside its cache_dir

#  TAG: logfile_rotate
#	Specifies the number of logfile rotations to make when you
#	type 'squid -k rotate'. The default is 10, which will rotate
#	with extensions 0 through 9. Setting logfile_rotate to 0 will
#	disable the file name rotation, but the logfiles are still closed
#	and re-opened. This will enable you to rename the logfiles
#	yourself just before sending the rotate signal.
#
#	Note, the 'squid -k rotate' command normally sends a USR1
#	signal to the running squid process.  In certain situations
#	(e.g. on Linux with Async I/O), USR1 is used for other
#	purposes, so -k rotate uses another signal.  It is best to get
#	in the habit of using 'squid -k rotate' instead of 'kill -USR1
#	<pid>'.
#
#	Note, from Squid-3.1 this option is only a default for cache.log,
#	that log can be rotated separately by using debug_options.
#
#	Note2, for Debian/Linux the default of logfile_rotate is
#	zero, since it includes external logfile-rotation methods.
#Default:
# logfile_rotate 0

#  TAG: mime_table
#	Path to Squid's icon configuration file.
#
#	You shouldn't need to change this, but the default file contains
#	examples and formatting information if you do.
#Default:
# mime_table /usr/share/squid/mime.conf

#  TAG: log_mime_hdrs	on|off
#	The Cache can record both the request and the response MIME
#	headers for each HTTP transaction.  The headers are encoded
#	safely and will appear as two bracketed fields at the end of
#	the access log (for either the native or httpd-emulated log
#	formats).  To enable this logging set log_mime_hdrs to 'on'.
#Default:
# log_mime_hdrs off

#  TAG: pid_filename
#	A filename to write the process-id to.  To disable, enter "none".
#Default:
# pid_filename /var/run/squid.pid

#  TAG: client_netmask
#	A netmask for client addresses in logfiles and cachemgr output.
#	Change this to protect the privacy of your cache clients.
#	A netmask of 255.255.255.0 will log all IP's in that range with
#	the last digit set to '0'.
#Default:
# Log full client IP address

#  TAG: strip_query_terms
#	By default, Squid strips query terms from requested URLs before
#	logging.  This protects your user's privacy and reduces log size.
#
#	When investigating HIT/MISS or other caching behaviour you
#	will need to disable this to see the full URL used by Squid.
#Default:
# strip_query_terms on

#  TAG: buffered_logs	on|off
#	Whether to write/send access_log records ASAP or accumulate them and
#	then write/send them in larger chunks. Buffering may improve
#	performance because it decreases the number of I/Os. However,
#	buffering increases the delay before log records become available to
#	the final recipient (e.g., a disk file or logging daemon) and,
#	hence, increases the risk of log records loss.
#
#	Note that even when buffered_logs are off, Squid may have to buffer
#	records if it cannot write/send them immediately due to pending I/Os
#	(e.g., the I/O writing the previous log record) or connectivity loss.
#
#	Currently honored by 'daemon' and 'tcp' access_log modules only.
#Default:
# buffered_logs off

#  TAG: netdb_filename
#	Where Squid stores it's netdb journal.
#	When enabled this journal preserves netdb state between restarts.
#
#	To disable, enter "none".
#Default:
# netdb_filename stdio:/var/log/squid/netdb.state

# OPTIONS FOR TROUBLESHOOTING
# -----------------------------------------------------------------------------

#  TAG: cache_log
#	Squid administrative logging file.
#
#	This is where general information about Squid behavior goes. You can
#	increase the amount of data logged to this file and how often it is
#	rotated with "debug_options"
#Default:
# cache_log /var/log/squid/cache.log

#  TAG: debug_options
#	Logging options are set as section,level where each source file
#	is assigned a unique section.  Lower levels result in less
#	output,  Full debugging (level 9) can result in a very large
#	log file, so be careful.
#
#	The magic word "ALL" sets debugging levels for all sections.
#	The default is to run with "ALL,1" to record important warnings.
#
#	The rotate=N option can be used to keep more or less of these logs
#	than would otherwise be kept by logfile_rotate.
#	For most uses a single log should be enough to monitor current
#	events affecting Squid.
#Default:
# Log all critical and important messages.

#  TAG: coredump_dir
#	By default Squid leaves core files in the directory from where
#	it was started. If you set 'coredump_dir' to a directory
#	that exists, Squid will chdir() to that directory at startup
#	and coredump files will be left there.
#
#Default:
# Use the directory from where Squid was started.
#

# Leave coredumps in the first cache dir
coredump_dir /var/spool/squid

# OPTIONS FOR FTP GATEWAYING
# -----------------------------------------------------------------------------

#  TAG: ftp_user
#	If you want the anonymous login password to be more informative
#	(and enable the use of picky FTP servers), set this to something
#	reasonable for your domain, like wwwuser@somewhere.net
#
#	The reason why this is domainless by default is the
#	request can be made on the behalf of a user in any domain,
#	depending on how the cache is used.
#	Some FTP server also validate the email address is valid
#	(for example perl.com).
#Default:
# ftp_user Squid@

#  TAG: ftp_passive
#	If your firewall does not allow Squid to use passive
#	connections, turn off this option.
#
#	Use of ftp_epsv_all option requires this to be ON.
#Default:
# ftp_passive on

#  TAG: ftp_epsv_all
#	FTP Protocol extensions permit the use of a special "EPSV ALL" command.
#
#	NATs may be able to put the connection on a "fast path" through the
#	translator, as the EPRT command will never be used and therefore,
#	translation of the data portion of the segments will never be needed.
#
#	When a client only expects to do two-way FTP transfers this may be
#	useful.
#	If squid finds that it must do a three-way FTP transfer after issuing
#	an EPSV ALL command, the FTP session will fail.
#
#	If you have any doubts about this option do not use it.
#	Squid will nicely attempt all other connection methods.
#
#	Requires ftp_passive to be ON (default) for any effect.
#Default:
# ftp_epsv_all off

#  TAG: ftp_epsv
#	FTP Protocol extensions permit the use of a special "EPSV" command.
#
#	NATs may be able to put the connection on a "fast path" through the
#	translator using EPSV, as the EPRT command will never be used
#	and therefore, translation of the data portion of the segments 
#	will never be needed.
#
#	EPSV is often required to interoperate with FTP servers on IPv6
#	networks. On the other hand, it may break some IPv4 servers.
#
#	By default, EPSV may try EPSV with any FTP server. To fine tune
#	that decision, you may restrict EPSV to certain clients or servers
#	using ACLs:
#
#		ftp_epsv allow|deny al1 acl2 ...
#
#	WARNING: Disabling EPSV may cause problems with external NAT and IPv6.
#
#	Only fast ACLs are supported.
#	Requires ftp_passive to be ON (default) for any effect.
#Default:
# none

#  TAG: ftp_eprt
#	FTP Protocol extensions permit the use of a special "EPRT" command.
#
#	This extension provides a protocol neutral alternative to the
#	IPv4-only PORT command. When supported it enables active FTP data
#	channels over IPv6 and efficient NAT handling.
#
#	Turning this OFF will prevent EPRT being attempted and will skip
#	straight to using PORT for IPv4 servers.
#
#	Some devices are known to not handle this extension correctly and
#	may result in crashes. Devices which suport EPRT enough to fail
#	cleanly will result in Squid attempting PORT anyway. This directive
#	should only be disabled when EPRT results in device failures.
#
#	WARNING: Doing so will convert Squid back to the old behavior with all
#	the related problems with external NAT devices/layers and IPv4-only FTP.
#Default:
# ftp_eprt on

#  TAG: ftp_sanitycheck
#	For security and data integrity reasons Squid by default performs
#	sanity checks of the addresses of FTP data connections ensure the
#	data connection is to the requested server. If you need to allow
#	FTP connections to servers using another IP address for the data
#	connection turn this off.
#Default:
# ftp_sanitycheck on

#  TAG: ftp_telnet_protocol
#	The FTP protocol is officially defined to use the telnet protocol
#	as transport channel for the control connection. However, many
#	implementations are broken and does not respect this aspect of
#	the FTP protocol.
#
#	If you have trouble accessing files with ASCII code 255 in the
#	path or similar problems involving this ASCII code you can
#	try setting this directive to off. If that helps, report to the
#	operator of the FTP server in question that their FTP server
#	is broken and does not follow the FTP standard.
#Default:
# ftp_telnet_protocol on

# OPTIONS FOR EXTERNAL SUPPORT PROGRAMS
# -----------------------------------------------------------------------------

#  TAG: diskd_program
#	Specify the location of the diskd executable.
#	Note this is only useful if you have compiled in
#	diskd as one of the store io modules.
#Default:
# diskd_program /usr/lib/squid/diskd

#  TAG: unlinkd_program
#	Specify the location of the executable for file deletion process.
#Default:
# unlinkd_program /usr/lib/squid/unlinkd

#  TAG: pinger_program
#	Specify the location of the executable for the pinger process.
#Default:
# pinger_program /usr/lib/squid/pinger

#  TAG: pinger_enable
#	Control whether the pinger is active at run-time.
#	Enables turning ICMP pinger on and off with a simple
#	squid -k reconfigure.
#Default:
# pinger_enable on

# OPTIONS FOR URL REWRITING
# -----------------------------------------------------------------------------

#  TAG: url_rewrite_program
#	Specify the location of the executable URL rewriter to use.
#	Since they can perform almost any function there isn't one included.
#
#	For each requested URL, the rewriter will receive on line with the format
#
#	  [channel-ID <SP>] URL [<SP> extras]<NL>
#
#	See url_rewrite_extras on how to send "extras" with optional values to
#	the helper.
#	After processing the request the helper must reply using the following format:
#
#	  [channel-ID <SP>] result [<SP> kv-pairs]
#
#	The result code can be:
#
#	  OK status=30N url="..."
#		Redirect the URL to the one supplied in 'url='.
#		'status=' is optional and contains the status code to send
#		the client in Squids HTTP response. It must be one of the
#		HTTP redirect status codes: 301, 302, 303, 307, 308.
#		When no status is given Squid will use 302.
#
#	  OK rewrite-url="..."
#		Rewrite the URL to the one supplied in 'rewrite-url='.
#		The new URL is fetched directly by Squid and returned to
#		the client as the response to its request.
#
#	  OK
#		When neither of url= and rewrite-url= are sent Squid does
#		not change the URL.
#
#	  ERR
#		Do not change the URL.
#
#	  BH
#		An internal error occurred in the helper, preventing
#		a result being identified. The 'message=' key name is
#		reserved for delivering a log message.
#
#
#	In addition to the above kv-pairs Squid also understands the following
#	optional kv-pairs received from URL rewriters:
#	  clt_conn_tag=TAG
#		Associates a TAG with the client TCP connection.
#		The TAG is treated as a regular annotation but persists across
#		future requests on the client connection rather than just the
#		current request. A helper may update the TAG during subsequent
#		requests be returning a new kv-pair.
#
#	When using the concurrency= option the protocol is changed by
#	introducing a query channel tag in front of the request/response.
#	The query channel tag is a number between 0 and concurrency-1.
#	This value must be echoed back unchanged to Squid as the first part
#	of the response relating to its request.
#
#	WARNING: URL re-writing ability should be avoided whenever possible.
#		 Use the URL redirect form of response instead.
#
#	Re-write creates a difference in the state held by the client
#	and server. Possibly causing confusion when the server response
#	contains snippets of its view state. Embeded URLs, response
#	and content Location headers, etc. are not re-written by this
#	interface.
#
#	By default, a URL rewriter is not used.
#Default:
# none

#  TAG: url_rewrite_children
#	The maximum number of redirector processes to spawn. If you limit
#	it too few Squid will have to wait for them to process a backlog of
#	URLs, slowing it down. If you allow too many they will use RAM
#	and other system resources noticably.
#	
#	The startup= and idle= options allow some measure of skew in your
#	tuning.
#	
#		startup=
#	
#	Sets a minimum of how many processes are to be spawned when Squid
#	starts or reconfigures. When set to zero the first request will
#	cause spawning of the first child process to handle it.
#	
#	Starting too few will cause an initial slowdown in traffic as Squid
#	attempts to simultaneously spawn enough processes to cope.
#	
#		idle=
#	
#	Sets a minimum of how many processes Squid is to try and keep available
#	at all times. When traffic begins to rise above what the existing
#	processes can handle this many more will be spawned up to the maximum
#	configured. A minimum setting of 1 is required.
#
#		concurrency=
#
#	The number of requests each redirector helper can handle in
#	parallel. Defaults to 0 which indicates the redirector
#	is a old-style single threaded redirector.
#
#	When this directive is set to a value >= 1 then the protocol
#	used to communicate with the helper is modified to include
#	an ID in front of the request/response. The ID from the request
#	must be echoed back with the response to that request.
#Default:
# url_rewrite_children 20 startup=0 idle=1 concurrency=0

#  TAG: url_rewrite_host_header
#	To preserve same-origin security policies in browsers and
#	prevent Host: header forgery by redirectors Squid rewrites
#	any Host: header in redirected requests.
#	
#	If you are running an accelerator this may not be a wanted
#	effect of a redirector. This directive enables you disable
#	Host: alteration in reverse-proxy traffic.
#	
#	WARNING: Entries are cached on the result of the URL rewriting
#	process, so be careful if you have domain-virtual hosts.
#	
#	WARNING: Squid and other software verifies the URL and Host
#	are matching, so be careful not to relay through other proxies
#	or inspecting firewalls with this disabled.
#Default:
# url_rewrite_host_header on

#  TAG: url_rewrite_access
#	If defined, this access list specifies which requests are
#	sent to the redirector processes.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: url_rewrite_bypass
#	When this is 'on', a request will not go through the
#	redirector if all the helpers are busy.  If this is 'off'
#	and the redirector queue grows too large, Squid will exit
#	with a FATAL error and ask you to increase the number of
#	redirectors.  You should only enable this if the redirectors
#	are not critical to your caching system.  If you use
#	redirectors for access control, and you enable this option,
#	users may have access to pages they should not
#	be allowed to request.
#Default:
# url_rewrite_bypass off

#  TAG: url_rewrite_extras
#	Specifies a string to be append to request line format for the
#	rewriter helper. "Quoted" format values may contain spaces and
#	logformat %macros. In theory, any logformat %macro can be used.
#	In practice, a %macro expands as a dash (-) if the helper request is
#	sent before the required macro information is available to Squid.
#Default:
# url_rewrite_extras "%>a/%>A %un %>rm myip=%la myport=%lp"

# OPTIONS FOR STORE ID
# -----------------------------------------------------------------------------

#  TAG: store_id_program
#	Specify the location of the executable StoreID helper to use.
#	Since they can perform almost any function there isn't one included.
#
#	For each requested URL, the helper will receive one line with the format
#
#	  [channel-ID <SP>] URL [<SP> extras]<NL>
#
#
#	After processing the request the helper must reply using the following format:
#
#	  [channel-ID <SP>] result [<SP> kv-pairs]
#
#	The result code can be:
#
#	  OK store-id="..."
#		Use the StoreID supplied in 'store-id='.
#
#	  ERR
#		The default is to use HTTP request URL as the store ID.
#
#	  BH
#		An internal error occured in the helper, preventing
#		a result being identified.
#
#	In addition to the above kv-pairs Squid also understands the following
#	optional kv-pairs received from URL rewriters:
#	  clt_conn_tag=TAG
#		Associates a TAG with the client TCP connection.
#		Please see url_rewrite_program related documentation for this
#		kv-pair
#
#	Helper programs should be prepared to receive and possibly ignore
#	additional whitespace-separated tokens on each input line.
#
#	When using the concurrency= option the protocol is changed by
#	introducing a query channel tag in front of the request/response.
#	The query channel tag is a number between 0 and concurrency-1.
#	This value must be echoed back unchanged to Squid as the first part
#	of the response relating to its request.
#
#	NOTE: when using StoreID refresh_pattern will apply to the StoreID
#	      returned from the helper and not the URL.
#
#	WARNING: Wrong StoreID value returned by a careless helper may result
#	         in the wrong cached response returned to the user.
#
#	By default, a StoreID helper is not used.
#Default:
# none

#  TAG: store_id_extras
#        Specifies a string to be append to request line format for the
#        StoreId helper. "Quoted" format values may contain spaces and
#        logformat %macros. In theory, any logformat %macro can be used.
#        In practice, a %macro expands as a dash (-) if the helper request is
#        sent before the required macro information is available to Squid.
#Default:
# store_id_extras "%>a/%>A %un %>rm myip=%la myport=%lp"

#  TAG: store_id_children
#	The maximum number of StoreID helper processes to spawn. If you limit
#	it too few Squid will have to wait for them to process a backlog of
#	requests, slowing it down. If you allow too many they will use RAM
#	and other system resources noticably.
#	
#	The startup= and idle= options allow some measure of skew in your
#	tuning.
#	
#		startup=
#	
#	Sets a minimum of how many processes are to be spawned when Squid
#	starts or reconfigures. When set to zero the first request will
#	cause spawning of the first child process to handle it.
#	
#	Starting too few will cause an initial slowdown in traffic as Squid
#	attempts to simultaneously spawn enough processes to cope.
#	
#		idle=
#	
#	Sets a minimum of how many processes Squid is to try and keep available
#	at all times. When traffic begins to rise above what the existing
#	processes can handle this many more will be spawned up to the maximum
#	configured. A minimum setting of 1 is required.
#
#		concurrency=
#
#	The number of requests each storeID helper can handle in
#	parallel. Defaults to 0 which indicates the helper
#	is a old-style single threaded program.
#
#	When this directive is set to a value >= 1 then the protocol
#	used to communicate with the helper is modified to include
#	an ID in front of the request/response. The ID from the request
#	must be echoed back with the response to that request.
#Default:
# store_id_children 20 startup=0 idle=1 concurrency=0

#  TAG: store_id_access
#	If defined, this access list specifies which requests are
#	sent to the StoreID processes.  By default all requests
#	are sent.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: store_id_bypass
#	When this is 'on', a request will not go through the
#	helper if all helpers are busy.  If this is 'off'
#	and the helper queue grows too large, Squid will exit
#	with a FATAL error and ask you to increase the number of
#	helpers.  You should only enable this if the helperss
#	are not critical to your caching system.  If you use
#	helpers for critical caching components, and you enable this 
#	option,	users may not get objects from cache.
#Default:
# store_id_bypass on

# OPTIONS FOR TUNING THE CACHE
# -----------------------------------------------------------------------------

#  TAG: cache
#	Requests denied by this directive will not be served from the cache
#	and their responses will not be stored in the cache. This directive
#	has no effect on other transactions and on already cached responses.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#	This and the two other similar caching directives listed below are
#	checked at different transaction processing stages, have different
#	access to response information, affect different cache operations,
#	and differ in slow ACLs support:
#
#	* cache: Checked before Squid makes a hit/miss determination.
#		No access to reply information!
#		Denies both serving a hit and storing a miss.
#		Supports both fast and slow ACLs.
#	* send_hit: Checked after a hit was detected.
#		Has access to reply (hit) information.
#		Denies serving a hit only.
#		Supports fast ACLs only.
#	* store_miss: Checked before storing a cachable miss.
#		Has access to reply (miss) information.
#		Denies storing a miss only.
#		Supports fast ACLs only.
#
#	If you are not sure which of the three directives to use, apply the
#	following decision logic:
#
#	* If your ACL(s) are of slow type _and_ need response info, redesign.
#	  Squid does not support that particular combination at this time.
#        Otherwise:
#	* If your directive ACL(s) are of slow type, use "cache"; and/or
#	* if your directive ACL(s) need no response info, use "cache".
#        Otherwise:
#	* If you do not want the response cached, use store_miss; and/or
#	* if you do not want a hit on a cached response, use send_hit.
#Default:
# By default, this directive is unused and has no effect.

#  TAG: send_hit
#	Responses denied by this directive will not be served from the cache
#	(but may still be cached, see store_miss). This directive has no
#	effect on the responses it allows and on the cached objects.
#
#	Please see the "cache" directive for a summary of differences among
#	store_miss, send_hit, and cache directives.
#
#	Unlike the "cache" directive, send_hit only supports fast acl
#	types.  See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#	For example:
#
#		# apply custom Store ID mapping to some URLs
#		acl MapMe dstdomain .c.example.com
#		store_id_program ...
#		store_id_access allow MapMe
#
#		# but prevent caching of special responses
#		# such as 302 redirects that cause StoreID loops
#		acl Ordinary http_status 200-299
#		store_miss deny MapMe !Ordinary
#
#		# and do not serve any previously stored special responses
#		# from the cache (in case they were already cached before
#		# the above store_miss rule was in effect).
#		send_hit deny MapMe !Ordinary
#Default:
# By default, this directive is unused and has no effect.

#  TAG: store_miss
#	Responses denied by this directive will not be cached (but may still
#	be served from the cache, see send_hit). This directive has no
#	effect on the responses it allows and on the already cached responses.
#
#	Please see the "cache" directive for a summary of differences among
#	store_miss, send_hit, and cache directives. See the
#	send_hit directive for a usage example.
#
#	Unlike the "cache" directive, store_miss only supports fast acl
#	types.  See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# By default, this directive is unused and has no effect.

#  TAG: max_stale	time-units
#	This option puts an upper limit on how stale content Squid
#	will serve from the cache if cache validation fails.
#	Can be overriden by the refresh_pattern max-stale option.
#Default:
# max_stale 1 week

#  TAG: refresh_pattern
#	usage: refresh_pattern [-i] regex min percent max [options]
#
#	By default, regular expressions are CASE-SENSITIVE.  To make
#	them case-insensitive, use the -i option.
#
#	'Min' is the time (in minutes) an object without an explicit
#	expiry time should be considered fresh. The recommended
#	value is 0, any higher values may cause dynamic applications
#	to be erroneously cached unless the application designer
#	has taken the appropriate actions.
#
#	'Percent' is a percentage of the objects age (time since last
#	modification age) an object without explicit expiry time
#	will be considered fresh.
#
#	'Max' is an upper limit on how long objects without an explicit
#	expiry time will be considered fresh.
#
#	options: override-expire
#		 override-lastmod
#		 reload-into-ims
#		 ignore-reload
#		 ignore-no-store
#		 ignore-must-revalidate
#		 ignore-private
#		 ignore-auth
#		 max-stale=NN
#		 refresh-ims
#		 store-stale
#
#		override-expire enforces min age even if the server
#		sent an explicit expiry time (e.g., with the
#		Expires: header or Cache-Control: max-age). Doing this
#		VIOLATES the HTTP standard.  Enabling this feature
#		could make you liable for problems which it causes.
#
#		Note: override-expire does not enforce staleness - it only extends
#		freshness / min. If the server returns a Expires time which
#		is longer than your max time, Squid will still consider
#		the object fresh for that period of time.
#
#		override-lastmod enforces min age even on objects
#		that were modified recently.
#
#		reload-into-ims changes a client no-cache or ``reload''
#		request for a cached entry into a conditional request using
#		If-Modified-Since and/or If-None-Match headers, provided the
#		cached entry has a Last-Modified and/or a strong ETag header.
#		Doing this VIOLATES the HTTP standard. Enabling this feature
#		could make you liable for problems which it causes.
#
#		ignore-reload ignores a client no-cache or ``reload''
#		header. Doing this VIOLATES the HTTP standard. Enabling
#		this feature could make you liable for problems which
#		it causes.
#
#		ignore-no-store ignores any ``Cache-control: no-store''
#		headers received from a server. Doing this VIOLATES
#		the HTTP standard. Enabling this feature could make you
#		liable for problems which it causes.
#
#		ignore-must-revalidate ignores any ``Cache-Control: must-revalidate``
#		headers received from a server. Doing this VIOLATES
#		the HTTP standard. Enabling this feature could make you
#		liable for problems which it causes.
#
#		ignore-private ignores any ``Cache-control: private''
#		headers received from a server. Doing this VIOLATES
#		the HTTP standard. Enabling this feature could make you
#		liable for problems which it causes.
#
#		ignore-auth caches responses to requests with authorization,
#		as if the originserver had sent ``Cache-control: public''
#		in the response header. Doing this VIOLATES the HTTP standard.
#		Enabling this feature could make you liable for problems which
#		it causes.
#
#		refresh-ims causes squid to contact the origin server
#		when a client issues an If-Modified-Since request. This
#		ensures that the client will receive an updated version
#		if one is available.
#
#		store-stale stores responses even if they don't have explicit 
#		freshness or a validator (i.e., Last-Modified or an ETag) 
#		present, or if they're already stale. By default, Squid will 
#		not cache such responses because they usually can't be
#		reused. Note that such responses will be stale by default.
#
#		max-stale=NN provide a maximum staleness factor. Squid won't
#		serve objects more stale than this even if it failed to
#		validate the object. Default: use the max_stale global limit.
#
#	Basically a cached object is:
#
#		FRESH if expire > now, else STALE
#		STALE if age > max
#		FRESH if lm-factor < percent, else STALE
#		FRESH if age < min
#		else STALE
#
#	The refresh_pattern lines are checked in the order listed here.
#	The first entry which matches is used.  If none of the entries
#	match the default will be used.
#
#	Note, you must uncomment all the default lines if you want
#	to change one. The default setting is only active if none is
#	used.
#
#

#
# Add any of your own refresh_pattern entries above these.
#
refresh_pattern ^ftp:		1440	20%	10080
refresh_pattern ^gopher:	1440	0%	1440
refresh_pattern -i (/cgi-bin/|\?) 0	0%	0
refresh_pattern (Release|Packages(.gz)*)$      0       20%     2880
# example lin deb packages
#refresh_pattern (\.deb|\.udeb)$   129600 100% 129600
refresh_pattern .		0	20%	4320

#  TAG: quick_abort_min	(KB)
#Default:
# quick_abort_min 16 KB

#  TAG: quick_abort_max	(KB)
#Default:
# quick_abort_max 16 KB

#  TAG: quick_abort_pct	(percent)
#	The cache by default continues downloading aborted requests
#	which are almost completed (less than 16 KB remaining). This
#	may be undesirable on slow (e.g. SLIP) links and/or very busy
#	caches.  Impatient users may tie up file descriptors and
#	bandwidth by repeatedly requesting and immediately aborting
#	downloads.
#
#	When the user aborts a request, Squid will check the
#	quick_abort values to the amount of data transferred until
#	then.
#
#	If the transfer has less than 'quick_abort_min' KB remaining,
#	it will finish the retrieval.
#
#	If the transfer has more than 'quick_abort_max' KB remaining,
#	it will abort the retrieval.
#
#	If more than 'quick_abort_pct' of the transfer has completed,
#	it will finish the retrieval.
#
#	If you do not want any retrieval to continue after the client
#	has aborted, set both 'quick_abort_min' and 'quick_abort_max'
#	to '0 KB'.
#
#	If you want retrievals to always continue if they are being
#	cached set 'quick_abort_min' to '-1 KB'.
#Default:
# quick_abort_pct 95

#  TAG: read_ahead_gap	buffer-size
#	The amount of data the cache will buffer ahead of what has been
#	sent to the client when retrieving an object from another server.
#Default:
# read_ahead_gap 16 KB

#  TAG: negative_ttl	time-units
#	Set the Default Time-to-Live (TTL) for failed requests.
#	Certain types of failures (such as "connection refused" and
#	"404 Not Found") are able to be negatively-cached for a short time.
#	Modern web servers should provide Expires: header, however if they
#	do not this can provide a minimum TTL.
#	The default is not to cache errors with unknown expiry details.
#
#	Note that this is different from negative caching of DNS lookups.
#
#	WARNING: Doing this VIOLATES the HTTP standard.  Enabling
#	this feature could make you liable for problems which it
#	causes.
#Default:
# negative_ttl 0 seconds

#  TAG: positive_dns_ttl	time-units
#	Upper limit on how long Squid will cache positive DNS responses.
#	Default is 6 hours (360 minutes). This directive must be set
#	larger than negative_dns_ttl.
#Default:
# positive_dns_ttl 6 hours

#  TAG: negative_dns_ttl	time-units
#	Time-to-Live (TTL) for negative caching of failed DNS lookups.
#	This also sets the lower cache limit on positive lookups.
#	Minimum value is 1 second, and it is not recommendable to go
#	much below 10 seconds.
#Default:
# negative_dns_ttl 1 minutes

#  TAG: range_offset_limit	size [acl acl...]
#	usage: (size) [units] [[!]aclname]
#	
#	Sets an upper limit on how far (number of bytes) into the file 
#	a Range request	may be to cause Squid to prefetch the whole file. 
#	If beyond this limit, Squid forwards the Range request as it is and 
#	the result is NOT cached.
#	
#	This is to stop a far ahead range request (lets say start at 17MB)
#	from making Squid fetch the whole object up to that point before
#	sending anything to the client.
#	
#	Multiple range_offset_limit lines may be specified, and they will 
#	be searched from top to bottom on each request until a match is found. 
#	The first match found will be used.  If no line matches a request, the 
#	default limit of 0 bytes will be used.
#	
#	'size' is the limit specified as a number of units.
#	
#	'units' specifies whether to use bytes, KB, MB, etc.
#	If no units are specified bytes are assumed.
#	
#	A size of 0 causes Squid to never fetch more than the
#	client requested. (default)
#	
#	A size of 'none' causes Squid to always fetch the object from the
#	beginning so it may cache the result. (2.0 style)
#	
#	'aclname' is the name of a defined ACL.
#	
#	NP: Using 'none' as the byte value here will override any quick_abort settings 
#	    that may otherwise apply to the range request. The range request will
#	    be fully fetched from start to finish regardless of the client
#	    actions. This affects bandwidth usage.
#Default:
# none

#  TAG: minimum_expiry_time	(seconds)
#	The minimum caching time according to (Expires - Date)
#	headers Squid honors if the object can't be revalidated.
#	The default is 60 seconds.
#
#	In reverse proxy environments it might be desirable to honor
#	shorter object lifetimes. It is most likely better to make
#	your server return a meaningful Last-Modified header however.
#
#	In ESI environments where page fragments often have short
#	lifetimes, this will often be best set to 0.
#Default:
# minimum_expiry_time 60 seconds

#  TAG: store_avg_object_size	(bytes)
#	Average object size, used to estimate number of objects your
#	cache can hold.  The default is 13 KB.
#
#	This is used to pre-seed the cache index memory allocation to
#	reduce expensive reallocate operations while handling clients
#	traffic. Too-large values may result in memory allocation during
#	peak traffic, too-small values will result in wasted memory.
#
#	Check the cache manager 'info' report metrics for the real
#	object sizes seen by your Squid before tuning this.
#Default:
# store_avg_object_size 13 KB

#  TAG: store_objects_per_bucket
#	Target number of objects per bucket in the store hash table.
#	Lowering this value increases the total number of buckets and
#	also the storage maintenance rate.  The default is 20.
#Default:
# store_objects_per_bucket 20

# HTTP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: request_header_max_size	(KB)
#	This specifies the maximum size for HTTP headers in a request.
#	Request headers are usually relatively small (about 512 bytes).
#	Placing a limit on the request header size will catch certain
#	bugs (for example with persistent connections) and possibly
#	buffer-overflow or denial-of-service attacks.
#Default:
# request_header_max_size 64 KB

#  TAG: reply_header_max_size	(KB)
#	This specifies the maximum size for HTTP headers in a reply.
#	Reply headers are usually relatively small (about 512 bytes).
#	Placing a limit on the reply header size will catch certain
#	bugs (for example with persistent connections) and possibly
#	buffer-overflow or denial-of-service attacks.
#Default:
# reply_header_max_size 64 KB

#  TAG: request_body_max_size	(bytes)
#	This specifies the maximum size for an HTTP request body.
#	In other words, the maximum size of a PUT/POST request.
#	A user who attempts to send a request with a body larger
#	than this limit receives an "Invalid Request" error message.
#	If you set this parameter to a zero (the default), there will
#	be no limit imposed.
#
#	See also client_request_buffer_max_size for an alternative
#	limitation on client uploads which can be configured.
#Default:
# No limit.

#  TAG: client_request_buffer_max_size	(bytes)
#	This specifies the maximum buffer size of a client request.
#	It prevents squid eating too much memory when somebody uploads
#	a large file.
#Default:
# client_request_buffer_max_size 512 KB

#  TAG: broken_posts
#	A list of ACL elements which, if matched, causes Squid to send
#	an extra CRLF pair after the body of a PUT/POST request.
#
#	Some HTTP servers has broken implementations of PUT/POST,
#	and rely on an extra CRLF pair sent by some WWW clients.
#
#	Quote from RFC2616 section 4.1 on this matter:
#
#	  Note: certain buggy HTTP/1.0 client implementations generate an
#	  extra CRLF's after a POST request. To restate what is explicitly
#	  forbidden by the BNF, an HTTP/1.1 client must not preface or follow
#	  a request with an extra CRLF.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#Example:
# acl buggy_server url_regex ^http://....
# broken_posts allow buggy_server
#Default:
# Obey RFC 2616.

#  TAG: adaptation_uses_indirect_client	on|off
#	Controls whether the indirect client IP address (instead of the direct
#	client IP address) is passed to adaptation services.
#
#	See also: follow_x_forwarded_for adaptation_send_client_ip
#Default:
# adaptation_uses_indirect_client on

#  TAG: via	on|off
#	If set (default), Squid will include a Via header in requests and
#	replies as required by RFC2616.
#Default:
# via on

#  TAG: ie_refresh	on|off
#	Microsoft Internet Explorer up until version 5.5 Service
#	Pack 1 has an issue with transparent proxies, wherein it
#	is impossible to force a refresh.  Turning this on provides
#	a partial fix to the problem, by causing all IMS-REFRESH
#	requests from older IE versions to check the origin server
#	for fresh content.  This reduces hit ratio by some amount
#	(~10% in my experience), but allows users to actually get
#	fresh content when they want it.  Note because Squid
#	cannot tell if the user is using 5.5 or 5.5SP1, the behavior
#	of 5.5 is unchanged from old versions of Squid (i.e. a
#	forced refresh is impossible).  Newer versions of IE will,
#	hopefully, continue to have the new behavior and will be
#	handled based on that assumption.  This option defaults to
#	the old Squid behavior, which is better for hit ratios but
#	worse for clients using IE, if they need to be able to
#	force fresh content.
#Default:
# ie_refresh off

#  TAG: vary_ignore_expire	on|off
#	Many HTTP servers supporting Vary gives such objects
#	immediate expiry time with no cache-control header
#	when requested by a HTTP/1.0 client. This option
#	enables Squid to ignore such expiry times until
#	HTTP/1.1 is fully implemented.
#
#	WARNING: If turned on this may eventually cause some
#	varying objects not intended for caching to get cached.
#Default:
# vary_ignore_expire off

#  TAG: request_entities
#	Squid defaults to deny GET and HEAD requests with request entities,
#	as the meaning of such requests are undefined in the HTTP standard
#	even if not explicitly forbidden.
#
#	Set this directive to on if you have clients which insists
#	on sending request entities in GET or HEAD requests. But be warned
#	that there is server software (both proxies and web servers) which
#	can fail to properly process this kind of request which may make you
#	vulnerable to cache pollution attacks if enabled.
#Default:
# request_entities off

#  TAG: request_header_access
#	Usage: request_header_access header_name allow|deny [!]aclname ...
#
#	WARNING: Doing this VIOLATES the HTTP standard.  Enabling
#	this feature could make you liable for problems which it
#	causes.
#
#	This option replaces the old 'anonymize_headers' and the
#	older 'http_anonymizer' option with something that is much
#	more configurable. A list of ACLs for each header name allows
#	removal of specific header fields under specific conditions.
#
#	This option only applies to outgoing HTTP request headers (i.e.,
#	headers sent by Squid to the next HTTP hop such as a cache peer
#	or an origin server). The option has no effect during cache hit
#	detection. The equivalent adaptation vectoring point in ICAP
#	terminology is post-cache REQMOD.
#
#	The option is applied to individual outgoing request header
#	fields. For each request header field F, Squid uses the first
#	qualifying sets of request_header_access rules:
#
#	    1. Rules with header_name equal to F's name.
#	    2. Rules with header_name 'Other', provided F's name is not
#	       on the hard-coded list of commonly used HTTP header names.
#	    3. Rules with header_name 'All'.
#
#	Within that qualifying rule set, rule ACLs are checked as usual.
#	If ACLs of an "allow" rule match, the header field is allowed to
#	go through as is. If ACLs of a "deny" rule match, the header is
#	removed and request_header_replace is then checked to identify
#	if the removed header has a replacement. If no rules within the
#	set have matching ACLs, the header field is left as is.
#
#	For example, to achieve the same behavior as the old
#	'http_anonymizer standard' option, you should use:
#
#		request_header_access From deny all
#		request_header_access Referer deny all
#		request_header_access User-Agent deny all
#
#	Or, to reproduce the old 'http_anonymizer paranoid' feature
#	you should use:
#
#		request_header_access Authorization allow all
#		request_header_access Proxy-Authorization allow all
#		request_header_access Cache-Control allow all
#		request_header_access Content-Length allow all
#		request_header_access Content-Type allow all
#		request_header_access Date allow all
#		request_header_access Host allow all
#		request_header_access If-Modified-Since allow all
#		request_header_access Pragma allow all
#		request_header_access Accept allow all
#		request_header_access Accept-Charset allow all
#		request_header_access Accept-Encoding allow all
#		request_header_access Accept-Language allow all
#		request_header_access Connection allow all
#		request_header_access All deny all
#
#	HTTP reply headers are controlled with the reply_header_access directive.
#
#	By default, all headers are allowed (no anonymizing is performed).
#Default:
# No limits.

#  TAG: reply_header_access
#	Usage: reply_header_access header_name allow|deny [!]aclname ...
#
#	WARNING: Doing this VIOLATES the HTTP standard.  Enabling
#	this feature could make you liable for problems which it
#	causes.
#
#	This option only applies to reply headers, i.e., from the
#	server to the client.
#
#	This is the same as request_header_access, but in the other
#	direction. Please see request_header_access for detailed
#	documentation.
#
#	For example, to achieve the same behavior as the old
#	'http_anonymizer standard' option, you should use:
#
#		reply_header_access Server deny all
#		reply_header_access WWW-Authenticate deny all
#		reply_header_access Link deny all
#
#	Or, to reproduce the old 'http_anonymizer paranoid' feature
#	you should use:
#
#		reply_header_access Allow allow all
#		reply_header_access WWW-Authenticate allow all
#		reply_header_access Proxy-Authenticate allow all
#		reply_header_access Cache-Control allow all
#		reply_header_access Content-Encoding allow all
#		reply_header_access Content-Length allow all
#		reply_header_access Content-Type allow all
#		reply_header_access Date allow all
#		reply_header_access Expires allow all
#		reply_header_access Last-Modified allow all
#		reply_header_access Location allow all
#		reply_header_access Pragma allow all
#		reply_header_access Content-Language allow all
#		reply_header_access Retry-After allow all
#		reply_header_access Title allow all
#		reply_header_access Content-Disposition allow all
#		reply_header_access Connection allow all
#		reply_header_access All deny all
#
#	HTTP request headers are controlled with the request_header_access directive.
#
#	By default, all headers are allowed (no anonymizing is
#	performed).
#Default:
# No limits.

#  TAG: request_header_replace
#	Usage:   request_header_replace header_name message
#	Example: request_header_replace User-Agent Nutscrape/1.0 (CP/M; 8-bit)
#
#	This option allows you to change the contents of headers
#	denied with request_header_access above, by replacing them
#	with some fixed string.
#
#	This only applies to request headers, not reply headers.
#
#	By default, headers are removed if denied.
#Default:
# none

#  TAG: reply_header_replace
#        Usage:   reply_header_replace header_name message
#        Example: reply_header_replace Server Foo/1.0
#
#        This option allows you to change the contents of headers
#        denied with reply_header_access above, by replacing them
#        with some fixed string.
#
#        This only applies to reply headers, not request headers.
#
#        By default, headers are removed if denied.
#Default:
# none

#  TAG: request_header_add
#	Usage:   request_header_add field-name field-value acl1 [acl2] ...
#	Example: request_header_add X-Client-CA "CA=%ssl::>cert_issuer" all
#
#	This option adds header fields to outgoing HTTP requests (i.e.,
#	request headers sent by Squid to the next HTTP hop such as a
#	cache peer or an origin server). The option has no effect during
#	cache hit detection. The equivalent adaptation vectoring point
#	in ICAP terminology is post-cache REQMOD.
#
#	Field-name is a token specifying an HTTP header name. If a
#	standard HTTP header name is used, Squid does not check whether
#	the new header conflicts with any existing headers or violates
#	HTTP rules. If the request to be modified already contains a
#	field with the same name, the old field is preserved but the
#	header field values are not merged.
#
#	Field-value is either a token or a quoted string. If quoted
#	string format is used, then the surrounding quotes are removed
#	while escape sequences and %macros are processed.
#
#	In theory, all of the logformat codes can be used as %macros.
#	However, unlike logging (which happens at the very end of
#	transaction lifetime), the transaction may not yet have enough
#	information to expand a macro when the new header value is needed.
#	And some information may already be available to Squid but not yet
#	committed where the macro expansion code can access it (report
#	such instances!). The macro will be expanded into a single dash
#	('-') in such cases. Not all macros have been tested.
#
#	One or more Squid ACLs may be specified to restrict header
#	injection to matching requests. As always in squid.conf, all
#	ACLs in an option ACL list must be satisfied for the insertion
#	to happen. The request_header_add option supports fast ACLs
#	only.
#Default:
# none

#  TAG: note
#	This option used to log custom information about the master
#	transaction. For example, an admin may configure Squid to log
#	which "user group" the transaction belongs to, where "user group"
#	will be determined based on a set of ACLs and not [just]
#	authentication information.
#	Values of key/value pairs can be logged using %{key}note macros:
#
#	    note key value acl ...
#	    logformat myFormat ... %{key}note ...
#Default:
# none

#  TAG: relaxed_header_parser	on|off|warn
#	In the default "on" setting Squid accepts certain forms
#	of non-compliant HTTP messages where it is unambiguous
#	what the sending application intended even if the message
#	is not correctly formatted. The messages is then normalized
#	to the correct form when forwarded by Squid.
#
#	If set to "warn" then a warning will be emitted in cache.log
#	each time such HTTP error is encountered.
#
#	If set to "off" then such HTTP errors will cause the request
#	or response to be rejected.
#Default:
# relaxed_header_parser on

#  TAG: collapsed_forwarding	(on|off)
#       This option controls whether Squid is allowed to merge multiple
#       potentially cachable requests for the same URI before Squid knows
#       whether the response is going to be cachable.
#
#       This feature is disabled by default: Enabling collapsed forwarding
#       needlessly delays forwarding requests that look cachable (when they are
#       collapsed) but then need to be forwarded individually anyway because
#       they end up being for uncachable content. However, in some cases, such
#       as accelleration of highly cachable content with periodic or groupped
#       expiration times, the gains from collapsing [large volumes of
#       simultenous refresh requests] outweigh losses from such delays.
#Default:
# collapsed_forwarding off

# TIMEOUTS
# -----------------------------------------------------------------------------

#  TAG: forward_timeout	time-units
#	This parameter specifies how long Squid should at most attempt in
#	finding a forwarding path for the request before giving up.
#Default:
# forward_timeout 4 minutes

#  TAG: connect_timeout	time-units
#	This parameter specifies how long to wait for the TCP connect to
#	the requested server or peer to complete before Squid should
#	attempt to find another path where to forward the request.
#Default:
# connect_timeout 1 minute

#  TAG: peer_connect_timeout	time-units
#	This parameter specifies how long to wait for a pending TCP
#	connection to a peer cache.  The default is 30 seconds.   You
#	may also set different timeout values for individual neighbors
#	with the 'connect-timeout' option on a 'cache_peer' line.
#Default:
# peer_connect_timeout 30 seconds

#  TAG: read_timeout	time-units
#	Applied on peer server connections.
#
#	After each successful read(), the timeout will be extended by this
#	amount.  If no data is read again after this amount of time,
#	the request is aborted and logged with ERR_READ_TIMEOUT.
#
#	The default is 15 minutes.
#Default:
# read_timeout 15 minutes

#  TAG: write_timeout	time-units
#	This timeout is tracked for all connections that have data
#	available for writing and are waiting for the socket to become
#	ready. After each successful write, the timeout is extended by
#	the configured amount. If Squid has data to write but the
#	connection is not ready for the configured duration, the
#	transaction associated with the connection is terminated. The
#	default is 15 minutes.
#Default:
# write_timeout 15 minutes

#  TAG: request_timeout
#	How long to wait for complete HTTP request headers after initial
#	connection establishment.
#Default:
# request_timeout 5 minutes

#  TAG: client_idle_pconn_timeout
#	How long to wait for the next HTTP request on a persistent
#	client connection after the previous request completes.
#Default:
# client_idle_pconn_timeout 2 minutes

#  TAG: ftp_client_idle_timeout
#	How long to wait for an FTP request on a connection to Squid ftp_port.
#	Many FTP clients do not deal with idle connection closures well,
#	necessitating a longer default timeout than client_idle_pconn_timeout
#	used for incoming HTTP requests.
#Default:
# ftp_client_idle_timeout 30 minutes

#  TAG: client_lifetime	time-units
#	The maximum amount of time a client (browser) is allowed to
#	remain connected to the cache process.  This protects the Cache
#	from having a lot of sockets (and hence file descriptors) tied up
#	in a CLOSE_WAIT state from remote clients that go away without
#	properly shutting down (either because of a network failure or
#	because of a poor client implementation).  The default is one
#	day, 1440 minutes.
#
#	NOTE:  The default value is intended to be much larger than any
#	client would ever need to be connected to your cache.  You
#	should probably change client_lifetime only as a last resort.
#	If you seem to have many client connections tying up
#	filedescriptors, we recommend first tuning the read_timeout,
#	request_timeout, persistent_request_timeout and quick_abort values.
#Default:
# client_lifetime 1 day

#  TAG: half_closed_clients
#	Some clients may shutdown the sending side of their TCP
#	connections, while leaving their receiving sides open.	Sometimes,
#	Squid can not tell the difference between a half-closed and a
#	fully-closed TCP connection.
#
#	By default, Squid will immediately close client connections when
#	read(2) returns "no more data to read."
#
#	Change this option to 'on' and Squid will keep open connections
#	until a read(2) or write(2) on the socket returns an error.
#	This may show some benefits for reverse proxies. But if not
#	it is recommended to leave OFF.
#Default:
# half_closed_clients off

#  TAG: server_idle_pconn_timeout
#	Timeout for idle persistent connections to servers and other
#	proxies.
#Default:
# server_idle_pconn_timeout 1 minute

#  TAG: ident_timeout
#	Maximum time to wait for IDENT lookups to complete.
#
#	If this is too high, and you enabled IDENT lookups from untrusted
#	users, you might be susceptible to denial-of-service by having
#	many ident requests going at once.
#Default:
# ident_timeout 10 seconds

#  TAG: shutdown_lifetime	time-units
#	When SIGTERM or SIGHUP is received, the cache is put into
#	"shutdown pending" mode until all active sockets are closed.
#	This value is the lifetime to set for all open descriptors
#	during shutdown mode.  Any active clients after this many
#	seconds will receive a 'timeout' message.
#Default:
# shutdown_lifetime 30 seconds

# ADMINISTRATIVE PARAMETERS
# -----------------------------------------------------------------------------

#  TAG: cache_mgr
#	Email-address of local cache manager who will receive
#	mail if the cache dies.  The default is "webmaster".
#Default:
# cache_mgr webmaster

#  TAG: mail_from
#	From: email-address for mail sent when the cache dies.
#	The default is to use 'squid@unique_hostname'.
#
#	See also: unique_hostname directive.
#Default:
# none

#  TAG: mail_program
#	Email program used to send mail if the cache dies.
#	The default is "mail". The specified program must comply
#	with the standard Unix mail syntax:
#	  mail-program recipient < mailfile
#
#	Optional command line options can be specified.
#Default:
# mail_program mail

#  TAG: cache_effective_user
#	If you start Squid as root, it will change its effective/real
#	UID/GID to the user specified below.  The default is to change
#	to UID of proxy.
#	see also; cache_effective_group
#Default:
# cache_effective_user proxy

#  TAG: cache_effective_group
#	Squid sets the GID to the effective user's default group ID
#	(taken from the password file) and supplementary group list
#	from the groups membership.
#
#	If you want Squid to run with a specific GID regardless of
#	the group memberships of the effective user then set this
#	to the group (or GID) you want Squid to run as. When set
#	all other group privileges of the effective user are ignored
#	and only this GID is effective. If Squid is not started as
#	root the user starting Squid MUST be member of the specified
#	group.
#
#	This option is not recommended by the Squid Team.
#	Our preference is for administrators to configure a secure
#	user account for squid with UID/GID matching system policies.
#Default:
# Use system group memberships of the cache_effective_user account

#  TAG: httpd_suppress_version_string	on|off
#	Suppress Squid version string info in HTTP headers and HTML error pages.
#Default:
# httpd_suppress_version_string off

#  TAG: visible_hostname
#	If you want to present a special hostname in error messages, etc,
#	define this.  Otherwise, the return value of gethostname()
#	will be used. If you have multiple caches in a cluster and
#	get errors about IP-forwarding you must set them to have individual
#	names with this setting.
#Default:
# Automatically detect the system host name

#  TAG: unique_hostname
#	If you want to have multiple machines with the same
#	'visible_hostname' you must give each machine a different
#	'unique_hostname' so forwarding loops can be detected.
#Default:
# Copy the value from visible_hostname

#  TAG: hostname_aliases
#	A list of other DNS names your cache has.
#Default:
# none

#  TAG: umask
#	Minimum umask which should be enforced while the proxy
#	is running, in addition to the umask set at startup.
#
#	For a traditional octal representation of umasks, start
#        your value with 0.
#Default:
# umask 027

# OPTIONS FOR THE CACHE REGISTRATION SERVICE
# -----------------------------------------------------------------------------
#
#	This section contains parameters for the (optional) cache
#	announcement service.  This service is provided to help
#	cache administrators locate one another in order to join or
#	create cache hierarchies.
#
#	An 'announcement' message is sent (via UDP) to the registration
#	service by Squid.  By default, the announcement message is NOT
#	SENT unless you enable it with 'announce_period' below.
#
#	The announcement message includes your hostname, plus the
#	following information from this configuration file:
#
#		http_port
#		icp_port
#		cache_mgr
#
#	All current information is processed regularly and made
#	available on the Web at http://www.ircache.net/Cache/Tracker/.

#  TAG: announce_period
#	This is how frequently to send cache announcements.
#
#	To enable announcing your cache, just set an announce period.
#
#	Example:
#		announce_period 1 day
#Default:
# Announcement messages disabled.

#  TAG: announce_host
#	Set the hostname where announce registration messages will be sent.
#
#	See also announce_port and announce_file
#Default:
# announce_host tracker.ircache.net

#  TAG: announce_file
#	The contents of this file will be included in the announce
#	registration messages.
#Default:
# none

#  TAG: announce_port
#	Set the port where announce registration messages will be sent.
#
#	See also announce_host and announce_file
#Default:
# announce_port 3131

# HTTPD-ACCELERATOR OPTIONS
# -----------------------------------------------------------------------------

#  TAG: httpd_accel_surrogate_id
#	Surrogates (http://www.esi.org/architecture_spec_1.0.html)
#	need an identification token to allow control targeting. Because
#	a farm of surrogates may all perform the same tasks, they may share
#	an identification token.
#Default:
# visible_hostname is used if no specific ID is set.

#  TAG: http_accel_surrogate_remote	on|off
#	Remote surrogates (such as those in a CDN) honour the header
#	"Surrogate-Control: no-store-remote".
#
#	Set this to on to have squid behave as a remote surrogate.
#Default:
# http_accel_surrogate_remote off

#  TAG: esi_parser	libxml2|expat|custom
#	ESI markup is not strictly XML compatible. The custom ESI parser
#	will give higher performance, but cannot handle non ASCII character
#	encodings.
#Default:
# esi_parser custom

# DELAY POOL PARAMETERS
# -----------------------------------------------------------------------------

#  TAG: delay_pools
#	This represents the number of delay pools to be used.  For example,
#	if you have one class 2 delay pool and one class 3 delays pool, you
#	have a total of 2 delay pools.
#
#	See also delay_parameters, delay_class, delay_access for pool
#	configuration details.
#Default:
# delay_pools 0

#  TAG: delay_class
#	This defines the class of each delay pool.  There must be exactly one
#	delay_class line for each delay pool.  For example, to define two
#	delay pools, one of class 2 and one of class 3, the settings above
#	and here would be:
#
#	Example:
#	    delay_pools 4      # 4 delay pools
#	    delay_class 1 2    # pool 1 is a class 2 pool
#	    delay_class 2 3    # pool 2 is a class 3 pool
#	    delay_class 3 4    # pool 3 is a class 4 pool
#	    delay_class 4 5    # pool 4 is a class 5 pool
#
#	The delay pool classes are:
#
#		class 1		Everything is limited by a single aggregate
#				bucket.
#
#		class 2 	Everything is limited by a single aggregate
#				bucket as well as an "individual" bucket chosen
#				from bits 25 through 32 of the IPv4 address.
#
#		class 3		Everything is limited by a single aggregate
#				bucket as well as a "network" bucket chosen
#				from bits 17 through 24 of the IP address and a
#				"individual" bucket chosen from bits 17 through
#				32 of the IPv4 address.
#
#		class 4		Everything in a class 3 delay pool, with an
#				additional limit on a per user basis. This
#				only takes effect if the username is established
#				in advance - by forcing authentication in your
#				http_access rules.
#
#		class 5		Requests are grouped according their tag (see
#				external_acl's tag= reply).
#
#
#	Each pool also requires a delay_parameters directive to configure the pool size
#	and speed limits used whenever the pool is applied to a request. Along with
#	a set of delay_access directives to determine when it is used.
#
#	NOTE: If an IP address is a.b.c.d
#		-> bits 25 through 32 are "d"
#		-> bits 17 through 24 are "c"
#		-> bits 17 through 32 are "c * 256 + d"
#
#	NOTE-2: Due to the use of bitmasks in class 2,3,4 pools they only apply to
#		IPv4 traffic. Class 1 and 5 pools may be used with IPv6 traffic.
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#	See also delay_parameters and delay_access.
#Default:
# none

#  TAG: delay_access
#	This is used to determine which delay pool a request falls into.
#
#	delay_access is sorted per pool and the matching starts with pool 1,
#	then pool 2, ..., and finally pool N. The first delay pool where the
#	request is allowed is selected for the request. If it does not allow
#	the request to any pool then the request is not delayed (default).
#
#	For example, if you want some_big_clients in delay
#	pool 1 and lotsa_little_clients in delay pool 2:
#
#		delay_access 1 allow some_big_clients
#		delay_access 1 deny all
#		delay_access 2 allow lotsa_little_clients
#		delay_access 2 deny all
#		delay_access 3 allow authenticated_clients
#
#	See also delay_parameters and delay_class.
#
#Default:
# Deny using the pool, unless allow rules exist in squid.conf for the pool.

#  TAG: delay_parameters
#	This defines the parameters for a delay pool.  Each delay pool has
#	a number of "buckets" associated with it, as explained in the
#	description of delay_class.
#
#	For a class 1 delay pool, the syntax is:
#		delay_class pool 1
#		delay_parameters pool aggregate
#
#	For a class 2 delay pool:
#		delay_class pool 2
#		delay_parameters pool aggregate individual
#
#	For a class 3 delay pool:
#		delay_class pool 3
#		delay_parameters pool aggregate network individual
#
#	For a class 4 delay pool:
#		delay_class pool 4
#		delay_parameters pool aggregate network individual user
#
#	For a class 5 delay pool:
#		delay_class pool 5
#		delay_parameters pool tagrate
#
#	The option variables are:
#
#		pool		a pool number - ie, a number between 1 and the
#				number specified in delay_pools as used in
#				delay_class lines.
#
#		aggregate	the speed limit parameters for the aggregate bucket
#				(class 1, 2, 3).
#
#		individual	the speed limit parameters for the individual
#				buckets (class 2, 3).
#
#		network		the speed limit parameters for the network buckets
#				(class 3).
#
#		user		the speed limit parameters for the user buckets
#				(class 4).
#
#		tagrate		the speed limit parameters for the tag buckets
#				(class 5).
#
#	A pair of delay parameters is written restore/maximum, where restore is
#	the number of bytes (not bits - modem and network speeds are usually
#	quoted in bits) per second placed into the bucket, and maximum is the
#	maximum number of bytes which can be in the bucket at any time.
#
#	There must be one delay_parameters line for each delay pool.
#
#
#	For example, if delay pool number 1 is a class 2 delay pool as in the
#	above example, and is being used to strictly limit each host to 64Kbit/sec
#	(plus overheads), with no overall limit, the line is:
#
#		delay_parameters 1 none 8000/8000
#
#	Note that 8 x 8K Byte/sec -> 64K bit/sec.
#
#	Note that the word 'none' is used to represent no limit.
#
#
#	And, if delay pool number 2 is a class 3 delay pool as in the above
#	example, and you want to limit it to a total of 256Kbit/sec (strict limit)
#	with each 8-bit network permitted 64Kbit/sec (strict limit) and each
#	individual host permitted 4800bit/sec with a bucket maximum size of 64Kbits
#	to permit a decent web page to be downloaded at a decent speed
#	(if the network is not being limited due to overuse) but slow down
#	large downloads more significantly:
#
#		delay_parameters 2 32000/32000 8000/8000 600/8000
#
#	Note that 8 x  32K Byte/sec ->  256K bit/sec.
#		  8 x   8K Byte/sec ->   64K bit/sec.
#		  8 x 600  Byte/sec -> 4800  bit/sec.
#
#
#	Finally, for a class 4 delay pool as in the example - each user will
#	be limited to 128Kbits/sec no matter how many workstations they are logged into.:
#
#		delay_parameters 4 32000/32000 8000/8000 600/64000 16000/16000
#
#
#	See also delay_class and delay_access.
#
#Default:
# none

#  TAG: delay_initial_bucket_level	(percent, 0-100)
#	The initial bucket percentage is used to determine how much is put
#	in each bucket when squid starts, is reconfigured, or first notices
#	a host accessing it (in class 2 and class 3, individual hosts and
#	networks only have buckets associated with them once they have been
#	"seen" by squid).
#Default:
# delay_initial_bucket_level 50

# CLIENT DELAY POOL PARAMETERS
# -----------------------------------------------------------------------------

#  TAG: client_delay_pools
#	This option specifies the number of client delay pools used. It must
#	preceed other client_delay_* options.
#
#	Example:
#		client_delay_pools 2
#
#	See also client_delay_parameters and client_delay_access.
#Default:
# client_delay_pools 0

#  TAG: client_delay_initial_bucket_level	(percent, 0-no_limit)
#	This option determines the initial bucket size as a percentage of
#	max_bucket_size from client_delay_parameters. Buckets are created
#	at the time of the "first" connection from the matching IP. Idle
#	buckets are periodically deleted up.
#
#	You can specify more than 100 percent but note that such "oversized"
#	buckets are not refilled until their size goes down to max_bucket_size
#	from client_delay_parameters.
#
#	Example:
#		client_delay_initial_bucket_level 50
#Default:
# client_delay_initial_bucket_level 50

#  TAG: client_delay_parameters
#
#	This option configures client-side bandwidth limits using the
#	following format:
#
#	    client_delay_parameters pool speed_limit max_bucket_size
#
#	pool is an integer ID used for client_delay_access matching.
#
#	speed_limit is bytes added to the bucket per second.
#
#	max_bucket_size is the maximum size of a bucket, enforced after any
#	speed_limit additions.
#
#	Please see the delay_parameters option for more information and
#	examples.
#
#	Example:
#		client_delay_parameters 1 1024 2048
#		client_delay_parameters 2 51200 16384
#
#	See also client_delay_access.
#
#Default:
# none

#  TAG: client_delay_access
#	This option determines the client-side delay pool for the
#	request:
#
#	    client_delay_access pool_ID allow|deny acl_name
#
#	All client_delay_access options are checked in their pool ID
#	order, starting with pool 1. The first checked pool with allowed
#	request is selected for the request. If no ACL matches or there
#	are no client_delay_access options, the request bandwidth is not
#	limited.
#
#	The ACL-selected pool is then used to find the
#	client_delay_parameters for the request. Client-side pools are
#	not used to aggregate clients. Clients are always aggregated
#	based on their source IP addresses (one bucket per source IP).
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#	Additionally, only the client TCP connection details are available.
#	ACLs testing HTTP properties will not work.
#
#	Please see delay_access for more examples.
#
#	Example:
#		client_delay_access 1 allow low_rate_network
#		client_delay_access 2 allow vips_network
#
#
#	See also client_delay_parameters and client_delay_pools.
#Default:
# Deny use of the pool, unless allow rules exist in squid.conf for the pool.

# WCCPv1 AND WCCPv2 CONFIGURATION OPTIONS
# -----------------------------------------------------------------------------

#  TAG: wccp_router
#	Use this option to define your WCCP ``home'' router for
#	Squid.
#
#	wccp_router supports a single WCCP(v1) router
#
#	wccp2_router supports multiple WCCPv2 routers
#
#	only one of the two may be used at the same time and defines
#	which version of WCCP to use.
#Default:
# WCCP disabled.

#  TAG: wccp2_router
#	Use this option to define your WCCP ``home'' router for
#	Squid.
#
#	wccp_router supports a single WCCP(v1) router
#
#	wccp2_router supports multiple WCCPv2 routers
#
#	only one of the two may be used at the same time and defines
#	which version of WCCP to use.
#Default:
# WCCPv2 disabled.

#  TAG: wccp_version
#	This directive is only relevant if you need to set up WCCP(v1)
#	to some very old and end-of-life Cisco routers. In all other
#	setups it must be left unset or at the default setting.
#	It defines an internal version in the WCCP(v1) protocol,
#	with version 4 being the officially documented protocol.
#
#	According to some users, Cisco IOS 11.2 and earlier only
#	support WCCP version 3.  If you're using that or an earlier
#	version of IOS, you may need to change this value to 3, otherwise
#	do not specify this parameter.
#Default:
# wccp_version 4

#  TAG: wccp2_rebuild_wait
#	If this is enabled Squid will wait for the cache dir rebuild to finish
#	before sending the first wccp2 HereIAm packet
#Default:
# wccp2_rebuild_wait on

#  TAG: wccp2_forwarding_method
#	WCCP2 allows the setting of forwarding methods between the
#	router/switch and the cache.  Valid values are as follows:
#
#	gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
#	l2  - L2 redirect (forward the packet using Layer 2/MAC rewriting)
#
#	Currently (as of IOS 12.4) cisco routers only support GRE.
#	Cisco switches only support the L2 redirect assignment method.
#Default:
# wccp2_forwarding_method gre

#  TAG: wccp2_return_method
#	WCCP2 allows the setting of return methods between the
#	router/switch and the cache for packets that the cache
#	decides not to handle.  Valid values are as follows:
#
#	gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
#	l2  - L2 redirect (forward the packet using Layer 2/MAC rewriting)
#
#	Currently (as of IOS 12.4) cisco routers only support GRE.
#	Cisco switches only support the L2 redirect assignment.
#
#	If the "ip wccp redirect exclude in" command has been
#	enabled on the cache interface, then it is still safe for
#	the proxy server to use a l2 redirect method even if this
#	option is set to GRE.
#Default:
# wccp2_return_method gre

#  TAG: wccp2_assignment_method
#	WCCP2 allows the setting of methods to assign the WCCP hash
#	Valid values are as follows:
#
#	hash - Hash assignment
#	mask - Mask assignment
#
#	As a general rule, cisco routers support the hash assignment method
#	and cisco switches support the mask assignment method.
#Default:
# wccp2_assignment_method hash

#  TAG: wccp2_service
#	WCCP2 allows for multiple traffic services. There are two
#	types: "standard" and "dynamic". The standard type defines
#	one service id - http (id 0). The dynamic service ids can be from
#	51 to 255 inclusive.  In order to use a dynamic service id
#	one must define the type of traffic to be redirected; this is done
#	using the wccp2_service_info option.
#
#	The "standard" type does not require a wccp2_service_info option,
#	just specifying the service id will suffice.
#
#	MD5 service authentication can be enabled by adding
#	"password=<password>" to the end of this service declaration.
#
#	Examples:
#
#	wccp2_service standard 0	# for the 'web-cache' standard service
#	wccp2_service dynamic 80	# a dynamic service type which will be
#					# fleshed out with subsequent options.
#	wccp2_service standard 0 password=foo
#Default:
# Use the 'web-cache' standard service.

#  TAG: wccp2_service_info
#	Dynamic WCCPv2 services require further information to define the
#	traffic you wish to have diverted.
#
#	The format is:
#
#	wccp2_service_info <id> protocol=<protocol> flags=<flag>,<flag>..
#	    priority=<priority> ports=<port>,<port>..
#
#	The relevant WCCPv2 flags:
#	+ src_ip_hash, dst_ip_hash
#	+ source_port_hash, dst_port_hash
#	+ src_ip_alt_hash, dst_ip_alt_hash
#	+ src_port_alt_hash, dst_port_alt_hash
#	+ ports_source
#
#	The port list can be one to eight entries.
#
#	Example:
#
#	wccp2_service_info 80 protocol=tcp flags=src_ip_hash,ports_source
#	    priority=240 ports=80
#
#	Note: the service id must have been defined by a previous
#	'wccp2_service dynamic <id>' entry.
#Default:
# none

#  TAG: wccp2_weight
#	Each cache server gets assigned a set of the destination
#	hash proportional to their weight.
#Default:
# wccp2_weight 10000

#  TAG: wccp_address
#	Use this option if you require WCCPv2 to use a specific
#	interface address.
#
#	The default behavior is to not bind to any specific address.
#Default:
# Address selected by the operating system.

#  TAG: wccp2_address
#	Use this option if you require WCCP to use a specific
#	interface address.
#
#	The default behavior is to not bind to any specific address.
#Default:
# Address selected by the operating system.

# PERSISTENT CONNECTION HANDLING
# -----------------------------------------------------------------------------
#
# Also see "pconn_timeout" in the TIMEOUTS section

#  TAG: client_persistent_connections
#	Persistent connection support for clients.
#	Squid uses persistent connections (when allowed). You can use
#	this option to disable persistent connections with clients.
#Default:
# client_persistent_connections on

#  TAG: server_persistent_connections
#	Persistent connection support for servers.
#	Squid uses persistent connections (when allowed). You can use
#	this option to disable persistent connections with servers.
#Default:
# server_persistent_connections on

#  TAG: persistent_connection_after_error
#	With this directive the use of persistent connections after
#	HTTP errors can be disabled. Useful if you have clients
#	who fail to handle errors on persistent connections proper.
#Default:
# persistent_connection_after_error on

#  TAG: detect_broken_pconn
#	Some servers have been found to incorrectly signal the use
#	of HTTP/1.0 persistent connections even on replies not
#	compatible, causing significant delays. This server problem
#	has mostly been seen on redirects.
#
#	By enabling this directive Squid attempts to detect such
#	broken replies and automatically assume the reply is finished
#	after 10 seconds timeout.
#Default:
# detect_broken_pconn off

# CACHE DIGEST OPTIONS
# -----------------------------------------------------------------------------

#  TAG: digest_generation
#	This controls whether the server will generate a Cache Digest
#	of its contents.  By default, Cache Digest generation is
#	enabled if Squid is compiled with --enable-cache-digests defined.
#Default:
# digest_generation on

#  TAG: digest_bits_per_entry
#	This is the number of bits of the server's Cache Digest which
#	will be associated with the Digest entry for a given HTTP
#	Method and URL (public key) combination.  The default is 5.
#Default:
# digest_bits_per_entry 5

#  TAG: digest_rebuild_period	(seconds)
#	This is the wait time between Cache Digest rebuilds.
#Default:
# digest_rebuild_period 1 hour

#  TAG: digest_rewrite_period	(seconds)
#	This is the wait time between Cache Digest writes to
#	disk.
#Default:
# digest_rewrite_period 1 hour

#  TAG: digest_swapout_chunk_size	(bytes)
#	This is the number of bytes of the Cache Digest to write to
#	disk at a time.  It defaults to 4096 bytes (4KB), the Squid
#	default swap page.
#Default:
# digest_swapout_chunk_size 4096 bytes

#  TAG: digest_rebuild_chunk_percentage	(percent, 0-100)
#	This is the percentage of the Cache Digest to be scanned at a
#	time.  By default it is set to 10% of the Cache Digest.
#Default:
# digest_rebuild_chunk_percentage 10

# SNMP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: snmp_port
#	The port number where Squid listens for SNMP requests. To enable
#	SNMP support set this to a suitable port number. Port number
#	3401 is often used for the Squid SNMP agent. By default it's
#	set to "0" (disabled)
#
#	Example:
#		snmp_port 3401
#Default:
# SNMP disabled.

#  TAG: snmp_access
#	Allowing or denying access to the SNMP port.
#
#	All access to the agent is denied by default.
#	usage:
#
#	snmp_access allow|deny [!]aclname ...
#
#	This clause only supports fast acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#
#Example:
# snmp_access allow snmppublic localhost
# snmp_access deny all
#Default:
# Deny, unless rules exist in squid.conf.

#  TAG: snmp_incoming_address
#	Just like 'udp_incoming_address', but for the SNMP port.
#
#	snmp_incoming_address	is used for the SNMP socket receiving
#				messages from SNMP agents.
#
#	The default snmp_incoming_address is to listen on all
#	available network interfaces.
#Default:
# Accept SNMP packets from all machine interfaces.

#  TAG: snmp_outgoing_address
#	Just like 'udp_outgoing_address', but for the SNMP port.
#
#	snmp_outgoing_address	is used for SNMP packets returned to SNMP
#				agents.
#
#	If snmp_outgoing_address is not set it will use the same socket
#	as snmp_incoming_address. Only change this if you want to have
#	SNMP replies sent using another address than where this Squid
#	listens for SNMP queries.
#
#	NOTE, snmp_incoming_address and snmp_outgoing_address can not have
#	the same value since they both use the same port.
#Default:
# Use snmp_incoming_address or an address selected by the operating system.

# ICP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: icp_port
#	The port number where Squid sends and receives ICP queries to
#	and from neighbor caches.  The standard UDP port for ICP is 3130.
#
#	Example:
#		icp_port 3130
#Default:
# ICP disabled.

#  TAG: htcp_port
#	The port number where Squid sends and receives HTCP queries to
#	and from neighbor caches.  To turn it on you want to set it to
#	4827.
#
#	Example:
#		htcp_port 4827
#Default:
# HTCP disabled.

#  TAG: log_icp_queries	on|off
#	If set, ICP queries are logged to access.log. You may wish
#	do disable this if your ICP load is VERY high to speed things
#	up or to simplify log analysis.
#Default:
# log_icp_queries on

#  TAG: udp_incoming_address
#	udp_incoming_address	is used for UDP packets received from other
#				caches.
#
#	The default behavior is to not bind to any specific address.
#
#	Only change this if you want to have all UDP queries received on
#	a specific interface/address.
#
#	NOTE: udp_incoming_address is used by the ICP, HTCP, and DNS
#	modules. Altering it will affect all of them in the same manner.
#
#	see also; udp_outgoing_address
#
#	NOTE, udp_incoming_address and udp_outgoing_address can not
#	have the same value since they both use the same port.
#Default:
# Accept packets from all machine interfaces.

#  TAG: udp_outgoing_address
#	udp_outgoing_address	is used for UDP packets sent out to other
#				caches.
#
#	The default behavior is to not bind to any specific address.
#
#	Instead it will use the same socket as udp_incoming_address.
#	Only change this if you want to have UDP queries sent using another
#	address than where this Squid listens for UDP queries from other
#	caches.
#
#	NOTE: udp_outgoing_address is used by the ICP, HTCP, and DNS
#	modules. Altering it will affect all of them in the same manner.
#
#	see also; udp_incoming_address
#
#	NOTE, udp_incoming_address and udp_outgoing_address can not
#	have the same value since they both use the same port.
#Default:
# Use udp_incoming_address or an address selected by the operating system.

#  TAG: icp_hit_stale	on|off
#	If you want to return ICP_HIT for stale cache objects, set this
#	option to 'on'.  If you have sibling relationships with caches
#	in other administrative domains, this should be 'off'.  If you only
#	have sibling relationships with caches under your control,
#	it is probably okay to set this to 'on'.
#	If set to 'on', your siblings should use the option "allow-miss"
#	on their cache_peer lines for connecting to you.
#Default:
# icp_hit_stale off

#  TAG: minimum_direct_hops
#	If using the ICMP pinging stuff, do direct fetches for sites
#	which are no more than this many hops away.
#Default:
# minimum_direct_hops 4

#  TAG: minimum_direct_rtt	(msec)
#	If using the ICMP pinging stuff, do direct fetches for sites
#	which are no more than this many rtt milliseconds away.
#Default:
# minimum_direct_rtt 400

#  TAG: netdb_low
#	The low water mark for the ICMP measurement database.
#
#	Note: high watermark controlled by netdb_high directive.
#
#	These watermarks are counts, not percents.  The defaults are
#	(low) 900 and (high) 1000.  When the high water mark is
#	reached, database entries will be deleted until the low
#	mark is reached.
#Default:
# netdb_low 900

#  TAG: netdb_high
#	The high water mark for the ICMP measurement database.
#
#	Note: low watermark controlled by netdb_low directive.
#
#	These watermarks are counts, not percents.  The defaults are
#	(low) 900 and (high) 1000.  When the high water mark is
#	reached, database entries will be deleted until the low
#	mark is reached.
#Default:
# netdb_high 1000

#  TAG: netdb_ping_period
#	The minimum period for measuring a site.  There will be at
#	least this much delay between successive pings to the same
#	network.  The default is five minutes.
#Default:
# netdb_ping_period 5 minutes

#  TAG: query_icmp	on|off
#	If you want to ask your peers to include ICMP data in their ICP
#	replies, enable this option.
#
#	If your peer has configured Squid (during compilation) with
#	'--enable-icmp' that peer will send ICMP pings to origin server
#	sites of the URLs it receives.  If you enable this option the
#	ICP replies from that peer will include the ICMP data (if available).
#	Then, when choosing a parent cache, Squid will choose the parent with
#	the minimal RTT to the origin server.  When this happens, the
#	hierarchy field of the access.log will be
#	"CLOSEST_PARENT_MISS".  This option is off by default.
#Default:
# query_icmp off

#  TAG: test_reachability	on|off
#	When this is 'on', ICP MISS replies will be ICP_MISS_NOFETCH
#	instead of ICP_MISS if the target host is NOT in the ICMP
#	database, or has a zero RTT.
#Default:
# test_reachability off

#  TAG: icp_query_timeout	(msec)
#	Normally Squid will automatically determine an optimal ICP
#	query timeout value based on the round-trip-time of recent ICP
#	queries.  If you want to override the value determined by
#	Squid, set this 'icp_query_timeout' to a non-zero value.  This
#	value is specified in MILLISECONDS, so, to use a 2-second
#	timeout (the old default), you would write:
#
#		icp_query_timeout 2000
#Default:
# Dynamic detection.

#  TAG: maximum_icp_query_timeout	(msec)
#	Normally the ICP query timeout is determined dynamically.  But
#	sometimes it can lead to very large values (say 5 seconds).
#	Use this option to put an upper limit on the dynamic timeout
#	value.  Do NOT use this option to always use a fixed (instead
#	of a dynamic) timeout value. To set a fixed timeout see the
#	'icp_query_timeout' directive.
#Default:
# maximum_icp_query_timeout 2000

#  TAG: minimum_icp_query_timeout	(msec)
#	Normally the ICP query timeout is determined dynamically.  But
#	sometimes it can lead to very small timeouts, even lower than
#	the normal latency variance on your link due to traffic.
#	Use this option to put an lower limit on the dynamic timeout
#	value.  Do NOT use this option to always use a fixed (instead
#	of a dynamic) timeout value. To set a fixed timeout see the
#	'icp_query_timeout' directive.
#Default:
# minimum_icp_query_timeout 5

#  TAG: background_ping_rate	time-units
#	Controls how often the ICP pings are sent to siblings that
#	have background-ping set.
#Default:
# background_ping_rate 10 seconds

# MULTICAST ICP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: mcast_groups
#	This tag specifies a list of multicast groups which your server
#	should join to receive multicasted ICP queries.
#
#	NOTE!  Be very careful what you put here!  Be sure you
#	understand the difference between an ICP _query_ and an ICP
#	_reply_.  This option is to be set only if you want to RECEIVE
#	multicast queries.  Do NOT set this option to SEND multicast
#	ICP (use cache_peer for that).  ICP replies are always sent via
#	unicast, so this option does not affect whether or not you will
#	receive replies from multicast group members.
#
#	You must be very careful to NOT use a multicast address which
#	is already in use by another group of caches.
#
#	If you are unsure about multicast, please read the Multicast
#	chapter in the Squid FAQ (http://www.squid-cache.org/FAQ/).
#
#	Usage: mcast_groups 239.128.16.128 224.0.1.20
#
#	By default, Squid doesn't listen on any multicast groups.
#Default:
# none

#  TAG: mcast_miss_addr
# Note: This option is only available if Squid is rebuilt with the
#       -DMULTICAST_MISS_STREAM define
#
#	If you enable this option, every "cache miss" URL will
#	be sent out on the specified multicast address.
#
#	Do not enable this option unless you are are absolutely
#	certain you understand what you are doing.
#Default:
# disabled.

#  TAG: mcast_miss_ttl
# Note: This option is only available if Squid is rebuilt with the
#       -DMULTICAST_MISS_STREAM define
#
#	This is the time-to-live value for packets multicasted
#	when multicasting off cache miss URLs is enabled.  By
#	default this is set to 'site scope', i.e. 16.
#Default:
# mcast_miss_ttl 16

#  TAG: mcast_miss_port
# Note: This option is only available if Squid is rebuilt with the
#       -DMULTICAST_MISS_STREAM define
#
#	This is the port number to be used in conjunction with
#	'mcast_miss_addr'.
#Default:
# mcast_miss_port 3135

#  TAG: mcast_miss_encode_key
# Note: This option is only available if Squid is rebuilt with the
#       -DMULTICAST_MISS_STREAM define
#
#	The URLs that are sent in the multicast miss stream are
#	encrypted.  This is the encryption key.
#Default:
# mcast_miss_encode_key XXXXXXXXXXXXXXXX

#  TAG: mcast_icp_query_timeout	(msec)
#	For multicast peers, Squid regularly sends out ICP "probes" to
#	count how many other peers are listening on the given multicast
#	address.  This value specifies how long Squid should wait to
#	count all the replies.  The default is 2000 msec, or 2
#	seconds.
#Default:
# mcast_icp_query_timeout 2000

# INTERNAL ICON OPTIONS
# -----------------------------------------------------------------------------

#  TAG: icon_directory
#	Where the icons are stored. These are normally kept in
#	/usr/share/squid/icons
#Default:
# icon_directory /usr/share/squid/icons

#  TAG: global_internal_static
#	This directive controls is Squid should intercept all requests for
#	/squid-internal-static/ no matter which host the URL is requesting
#	(default on setting), or if nothing special should be done for
#	such URLs (off setting). The purpose of this directive is to make
#	icons etc work better in complex cache hierarchies where it may
#	not always be possible for all corners in the cache mesh to reach
#	the server generating a directory listing.
#Default:
# global_internal_static on

#  TAG: short_icon_urls
#	If this is enabled Squid will use short URLs for icons.
#	If disabled it will revert to the old behavior of including
#	it's own name and port in the URL.
#
#	If you run a complex cache hierarchy with a mix of Squid and
#	other proxies you may need to disable this directive.
#Default:
# short_icon_urls on

# ERROR PAGE OPTIONS
# -----------------------------------------------------------------------------

#  TAG: error_directory
#	If you wish to create your own versions of the default
#	error files to customize them to suit your company copy
#	the error/template files to another directory and point
#	this tag at them.
#
#	WARNING: This option will disable multi-language support
#	         on error pages if used.
#
#	The squid developers are interested in making squid available in
#	a wide variety of languages. If you are making translations for a
#	language that Squid does not currently provide please consider
#	contributing your translation back to the project.
#	http://wiki.squid-cache.org/Translations
#
#	The squid developers working on translations are happy to supply drop-in
#	translated error files in exchange for any new language contributions.
#Default:
# Send error pages in the clients preferred language

#  TAG: error_default_language
#	Set the default language which squid will send error pages in
#	if no existing translation matches the clients language
#	preferences.
#
#	If unset (default) generic English will be used.
#
#	The squid developers are interested in making squid available in
#	a wide variety of languages. If you are interested in making
#	translations for any language see the squid wiki for details.
#	http://wiki.squid-cache.org/Translations
#Default:
# Generate English language pages.

#  TAG: error_log_languages
#	Log to cache.log what languages users are attempting to
#	auto-negotiate for translations.
#
#	Successful negotiations are not logged. Only failures
#	have meaning to indicate that Squid may need an upgrade
#	of its error page translations.
#Default:
# error_log_languages on

#  TAG: err_page_stylesheet
#	CSS Stylesheet to pattern the display of Squid default error pages.
#
#	For information on CSS see http://www.w3.org/Style/CSS/
#Default:
# err_page_stylesheet /etc/squid/errorpage.css

#  TAG: err_html_text
#	HTML text to include in error messages.  Make this a "mailto"
#	URL to your admin address, or maybe just a link to your
#	organizations Web page.
#
#	To include this in your error messages, you must rewrite
#	the error template files (found in the "errors" directory).
#	Wherever you want the 'err_html_text' line to appear,
#	insert a %L tag in the error template file.
#Default:
# none

#  TAG: email_err_data	on|off
#	If enabled, information about the occurred error will be
#	included in the mailto links of the ERR pages (if %W is set)
#	so that the email body contains the data.
#	Syntax is <A HREF="mailto:%w%W">%w</A>
#Default:
# email_err_data on

#  TAG: deny_info
#	Usage:   deny_info err_page_name acl
#	or       deny_info http://... acl
#	or       deny_info TCP_RESET acl
#
#	This can be used to return a ERR_ page for requests which
#	do not pass the 'http_access' rules.  Squid remembers the last
#	acl it evaluated in http_access, and if a 'deny_info' line exists
#	for that ACL Squid returns a corresponding error page.
#
#	The acl is typically the last acl on the http_access deny line which
#	denied access. The exceptions to this rule are:
#	- When Squid needs to request authentication credentials. It's then
#	  the first authentication related acl encountered
#	- When none of the http_access lines matches. It's then the last
#	  acl processed on the last http_access line.
#	- When the decision to deny access was made by an adaptation service,
#	  the acl name is the corresponding eCAP or ICAP service_name.
#
#	NP: If providing your own custom error pages with error_directory
#	    you may also specify them by your custom file name:
#	    Example: deny_info ERR_CUSTOM_ACCESS_DENIED bad_guys
#
#	By defaut Squid will send "403 Forbidden". A different 4xx or 5xx
#	may be specified by prefixing the file name with the code and a colon.
#	e.g. 404:ERR_CUSTOM_ACCESS_DENIED
#
#	Alternatively you can tell Squid to reset the TCP connection
#	by specifying TCP_RESET.
#
#	Or you can specify an error URL or URL pattern. The browsers will
#	get redirected to the specified URL after formatting tags have
#	been replaced. Redirect will be done with 302 or 307 according to
#	HTTP/1.1 specs. A different 3xx code may be specified by prefixing
#	the URL. e.g. 303:http://example.com/
#
#	URL FORMAT TAGS:
#		%a	- username (if available. Password NOT included)
#		%B	- FTP path URL
#		%e	- Error number
#		%E	- Error description
#		%h	- Squid hostname
#		%H	- Request domain name
#		%i	- Client IP Address
#		%M	- Request Method
#		%o	- Message result from external ACL helper
#		%p	- Request Port number
#		%P	- Request Protocol name
#		%R	- Request URL path
#		%T	- Timestamp in RFC 1123 format
#		%U	- Full canonical URL from client
#			  (HTTPS URLs terminate with *)
#		%u	- Full canonical URL from client
#		%w	- Admin email from squid.conf
#		%x	- Error name
#		%%	- Literal percent (%) code
#
#Default:
# none

# OPTIONS INFLUENCING REQUEST FORWARDING 
# -----------------------------------------------------------------------------

#  TAG: nonhierarchical_direct
#	By default, Squid will send any non-hierarchical requests
#	(not cacheable request type) direct to origin servers.
#
#	When this is set to "off", Squid will prefer to send these
#	requests to parents.
#
#	Note that in most configurations, by turning this off you will only
#	add latency to these request without any improvement in global hit
#	ratio.
#
#	This option only sets a preference. If the parent is unavailable a
#	direct connection to the origin server may still be attempted. To
#	completely prevent direct connections use never_direct.
#Default:
# nonhierarchical_direct on

#  TAG: prefer_direct
#	Normally Squid tries to use parents for most requests. If you for some
#	reason like it to first try going direct and only use a parent if
#	going direct fails set this to on.
#
#	By combining nonhierarchical_direct off and prefer_direct on you
#	can set up Squid to use a parent as a backup path if going direct
#	fails.
#
#	Note: If you want Squid to use parents for all requests see
#	the never_direct directive. prefer_direct only modifies how Squid
#	acts on cacheable requests.
#Default:
# prefer_direct off

#  TAG: cache_miss_revalidate	on|off
#	RFC 7232 defines a conditional request mechanism to prevent
#	response objects being unnecessarily transferred over the network.
#	If that mechanism is used by the client and a cache MISS occurs
#	it can prevent new cache entries being created.
#
#	This option determines whether Squid on cache MISS will pass the
#	client revalidation request to the server or tries to fetch new
#	content for caching. It can be useful while the cache is mostly
#	empty to more quickly have the cache populated by generating
#	non-conditional GETs.
#
#	When set to 'on' (default), Squid will pass all client If-* headers
#	to the server. This permits server responses without a cacheable
#	payload to be delivered and on MISS no new cache entry is created.
#
#	When set to 'off' and if the request is cacheable, Squid will
#	remove the clients If-Modified-Since and If-None-Match headers from
#	the request sent to the server. This requests a 200 status response
#	from the server to create a new cache entry with.
#Default:
# cache_miss_revalidate on

#  TAG: always_direct
#	Usage: always_direct allow|deny [!]aclname ...
#
#	Here you can use ACL elements to specify requests which should
#	ALWAYS be forwarded by Squid to the origin servers without using
#	any peers.  For example, to always directly forward requests for
#	local servers ignoring any parents or siblings you may have use
#	something like:
#
#		acl local-servers dstdomain my.domain.net
#		always_direct allow local-servers
#
#	To always forward FTP requests directly, use
#
#		acl FTP proto FTP
#		always_direct allow FTP
#
#	NOTE: There is a similar, but opposite option named
#	'never_direct'.  You need to be aware that "always_direct deny
#	foo" is NOT the same thing as "never_direct allow foo".  You
#	may need to use a deny rule to exclude a more-specific case of
#	some other rule.  Example:
#
#		acl local-external dstdomain external.foo.net
#		acl local-servers dstdomain  .foo.net
#		always_direct deny local-external
#		always_direct allow local-servers
#
#	NOTE: If your goal is to make the client forward the request
#	directly to the origin server bypassing Squid then this needs
#	to be done in the client configuration. Squid configuration
#	can only tell Squid how Squid should fetch the object.
#
#	NOTE: This directive is not related to caching. The replies
#	is cached as usual even if you use always_direct. To not cache
#	the replies see the 'cache' directive.
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Prevent any cache_peer being used for this request.

#  TAG: never_direct
#	Usage: never_direct allow|deny [!]aclname ...
#
#	never_direct is the opposite of always_direct.  Please read
#	the description for always_direct if you have not already.
#
#	With 'never_direct' you can use ACL elements to specify
#	requests which should NEVER be forwarded directly to origin
#	servers.  For example, to force the use of a proxy for all
#	requests, except those in your local domain use something like:
#
#		acl local-servers dstdomain .foo.net
#		never_direct deny local-servers
#		never_direct allow all
#
#	or if Squid is inside a firewall and there are local intranet
#	servers inside the firewall use something like:
#
#		acl local-intranet dstdomain .foo.net
#		acl local-external dstdomain external.foo.net
#		always_direct deny local-external
#		always_direct allow local-intranet
#		never_direct allow all
#
#	This clause supports both fast and slow acl types.
#	See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
#Default:
# Allow DNS results to be used for this request.

# ADVANCED NETWORKING OPTIONS
# -----------------------------------------------------------------------------

#  TAG: incoming_udp_average
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# incoming_udp_average 6

#  TAG: incoming_tcp_average
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# incoming_tcp_average 4

#  TAG: incoming_dns_average
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# incoming_dns_average 4

#  TAG: min_udp_poll_cnt
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# min_udp_poll_cnt 8

#  TAG: min_dns_poll_cnt
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# min_dns_poll_cnt 8

#  TAG: min_tcp_poll_cnt
#	Heavy voodoo here.  I can't even believe you are reading this.
#	Are you crazy?  Don't even think about adjusting these unless
#	you understand the algorithms in comm_select.c first!
#Default:
# min_tcp_poll_cnt 8

#  TAG: accept_filter
#	FreeBSD:
#
#	The name of an accept(2) filter to install on Squid's
#	listen socket(s).  This feature is perhaps specific to
#	FreeBSD and requires support in the kernel.
#
#	The 'httpready' filter delays delivering new connections
#	to Squid until a full HTTP request has been received.
#	See the accf_http(9) man page for details.
#
#	The 'dataready' filter delays delivering new connections
#	to Squid until there is some data to process.
#	See the accf_dataready(9) man page for details.
#
#	Linux:
#	
#	The 'data' filter delays delivering of new connections
#	to Squid until there is some data to process by TCP_ACCEPT_DEFER.
#	You may optionally specify a number of seconds to wait by
#	'data=N' where N is the number of seconds. Defaults to 30
#	if not specified.  See the tcp(7) man page for details.
#EXAMPLE:
## FreeBSD
#accept_filter httpready
## Linux
#accept_filter data
#Default:
# none

#  TAG: client_ip_max_connections
#	Set an absolute limit on the number of connections a single
#	client IP can use. Any more than this and Squid will begin to drop
#	new connections from the client until it closes some links.
#
#	Note that this is a global limit. It affects all HTTP, HTCP, Gopher and FTP
#	connections from the client. For finer control use the ACL access controls.
#
#	Requires client_db to be enabled (the default).
#
#	WARNING: This may noticably slow down traffic received via external proxies
#	or NAT devices and cause them to rebound error messages back to their clients.
#Default:
# No limit.

#  TAG: tcp_recv_bufsize	(bytes)
#	Size of receive buffer to set for TCP sockets.  Probably just
#	as easy to change your kernel's default.
#	Omit from squid.conf to use the default buffer size.
#Default:
# Use operating system TCP defaults.

# ICAP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: icap_enable	on|off
#	If you want to enable the ICAP module support, set this to on.
#Default:
# icap_enable off

#  TAG: icap_connect_timeout
#	This parameter specifies how long to wait for the TCP connect to
#	the requested ICAP server to complete before giving up and either
#	terminating the HTTP transaction or bypassing the failure.
#
#	The default for optional services is peer_connect_timeout.
#	The default for essential services is connect_timeout.
#	If this option is explicitly set, its value applies to all services.
#Default:
# none

#  TAG: icap_io_timeout	time-units
#	This parameter specifies how long to wait for an I/O activity on
#	an established, active ICAP connection before giving up and
#	either terminating the HTTP transaction or bypassing the
#	failure.
#Default:
# Use read_timeout.

#  TAG: icap_service_failure_limit	limit [in memory-depth time-units]
#	The limit specifies the number of failures that Squid tolerates
#	when establishing a new TCP connection with an ICAP service. If
#	the number of failures exceeds the limit, the ICAP service is
#	not used for new ICAP requests until it is time to refresh its
#	OPTIONS.
#
#	A negative value disables the limit. Without the limit, an ICAP
#	service will not be considered down due to connectivity failures
#	between ICAP OPTIONS requests.
#
#	Squid forgets ICAP service failures older than the specified
#	value of memory-depth. The memory fading algorithm 
#	is approximate because Squid does not remember individual 
#	errors but groups them instead, splitting the option
#	value into ten time slots of equal length.
#
#	When memory-depth is 0 and by default this option has no 
#	effect on service failure expiration.
#
#	Squid always forgets failures when updating service settings
#	using an ICAP OPTIONS transaction, regardless of this option
#	setting.
#
#	For example,
#		# suspend service usage after 10 failures in 5 seconds:
#		icap_service_failure_limit 10 in 5 seconds
#Default:
# icap_service_failure_limit 10

#  TAG: icap_service_revival_delay
#	The delay specifies the number of seconds to wait after an ICAP
#	OPTIONS request failure before requesting the options again. The
#	failed ICAP service is considered "down" until fresh OPTIONS are
#	fetched.
#
#	The actual delay cannot be smaller than the hardcoded minimum
#	delay of 30 seconds.
#Default:
# icap_service_revival_delay 180

#  TAG: icap_preview_enable	on|off
#	The ICAP Preview feature allows the ICAP server to handle the
#	HTTP message by looking only at the beginning of the message body
#	or even without receiving the body at all. In some environments, 
#	previews greatly speedup ICAP processing.
#
#	During an ICAP OPTIONS transaction, the server may tell	Squid what
#	HTTP messages should be previewed and how big the preview should be.
#	Squid will not use Preview if the server did not request one.
#
#	To disable ICAP Preview for all ICAP services, regardless of
#	individual ICAP server OPTIONS responses, set this option to "off".
#Example:
#icap_preview_enable off
#Default:
# icap_preview_enable on

#  TAG: icap_preview_size
#	The default size of preview data to be sent to the ICAP server.
#	This value might be overwritten on a per server basis by OPTIONS requests.
#Default:
# No preview sent.

#  TAG: icap_206_enable	on|off
#	206 (Partial Content) responses is an ICAP extension that allows the
#	ICAP agents to optionally combine adapted and original HTTP message
#	content. The decision to combine is postponed until the end of the
#	ICAP response. Squid supports Partial Content extension by default.
#
#	Activation of the Partial Content extension is negotiated with each
#	ICAP service during OPTIONS exchange. Most ICAP servers should handle
#	negotation correctly even if they do not support the extension, but
#	some might fail. To disable Partial Content support for all ICAP
#	services and to avoid any negotiation, set this option to "off".
#
#	Example:
#	    icap_206_enable off
#Default:
# icap_206_enable on

#  TAG: icap_default_options_ttl
#	The default TTL value for ICAP OPTIONS responses that don't have
#	an Options-TTL header.
#Default:
# icap_default_options_ttl 60

#  TAG: icap_persistent_connections	on|off
#	Whether or not Squid should use persistent connections to
#	an ICAP server.
#Default:
# icap_persistent_connections on

#  TAG: adaptation_send_client_ip	on|off
#	If enabled, Squid shares HTTP client IP information with adaptation
#	services. For ICAP, Squid adds the X-Client-IP header to ICAP requests.
#	For eCAP, Squid sets the libecap::metaClientIp transaction option.
#
#	See also: adaptation_uses_indirect_client
#Default:
# adaptation_send_client_ip off

#  TAG: adaptation_send_username	on|off
#	This sends authenticated HTTP client username (if available) to
#	the adaptation service.
#
#	For ICAP, the username value is encoded based on the
#	icap_client_username_encode option and is sent using the header
#	specified by the icap_client_username_header option.
#Default:
# adaptation_send_username off

#  TAG: icap_client_username_header
#	ICAP request header name to use for adaptation_send_username.
#Default:
# icap_client_username_header X-Client-Username

#  TAG: icap_client_username_encode	on|off
#	Whether to base64 encode the authenticated client username.
#Default:
# icap_client_username_encode off

#  TAG: icap_service
#	Defines a single ICAP service using the following format:
#
#	icap_service id vectoring_point uri [option ...]
#
#	id: ID
#		an opaque identifier or name which is used to direct traffic to
#		this specific service. Must be unique among all adaptation
#		services in squid.conf.
#
#	vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
#		This specifies at which point of transaction processing the
#		ICAP service should be activated. *_postcache vectoring points
#		are not yet supported.
#
#	uri: icap://servername:port/servicepath
#		ICAP server and service location.
#
#	ICAP does not allow a single service to handle both REQMOD and RESPMOD
#	transactions. Squid does not enforce that requirement. You can specify
#	services with the same service_url and different vectoring_points. You
#	can even specify multiple identical services as long as their
#	service_names differ.
#
#	To activate a service, use the adaptation_access directive. To group
#	services, use adaptation_service_chain and adaptation_service_set.
#
#	Service options are separated by white space. ICAP services support
#	the following name=value options:
#
#	bypass=on|off|1|0
#		If set to 'on' or '1', the ICAP service is treated as
#		optional. If the service cannot be reached or malfunctions,
#		Squid will try to ignore any errors and process the message as
#		if the service was not enabled. No all ICAP errors can be
#		bypassed.  If set to 0, the ICAP service is treated as
#		essential and all ICAP errors will result in an error page
#		returned to the HTTP client.
#
#		Bypass is off by default: services are treated as essential.
#
#	routing=on|off|1|0
#		If set to 'on' or '1', the ICAP service is allowed to
#		dynamically change the current message adaptation plan by
#		returning a chain of services to be used next. The services
#		are specified using the X-Next-Services ICAP response header
#		value, formatted as a comma-separated list of service names.
#		Each named service should be configured in squid.conf. Other
#		services are ignored. An empty X-Next-Services value results
#		in an empty plan which ends the current adaptation.
#
#		Dynamic adaptation plan may cross or cover multiple supported
#		vectoring points in their natural processing order.
#
#		Routing is not allowed by default: the ICAP X-Next-Services
#		response header is ignored.
#
#	ipv6=on|off
#		Only has effect on split-stack systems. The default on those systems
#		is to use IPv4-only connections. When set to 'on' this option will
#		make Squid use IPv6-only connections to contact this ICAP service.
#
#	on-overload=block|bypass|wait|force
#		If the service Max-Connections limit has been reached, do
#		one of the following for each new ICAP transaction:
#		  * block:  send an HTTP error response to the client
#		  * bypass: ignore the "over-connected" ICAP service
#		  * wait:   wait (in a FIFO queue) for an ICAP connection slot
#		  * force:  proceed, ignoring the Max-Connections limit 
#
#		In SMP mode with N workers, each worker assumes the service
#		connection limit is Max-Connections/N, even though not all
#		workers may use a given service.
#
#		The default value is "bypass" if service is bypassable,
#		otherwise it is set to "wait".
#		
#
#	max-conn=number
#		Use the given number as the Max-Connections limit, regardless
#		of the Max-Connections value given by the service, if any.
#
#	Older icap_service format without optional named parameters is
#	deprecated but supported for backward compatibility.
#
#Example:
#icap_service svcBlocker reqmod_precache icap://icap1.mydomain.net:1344/reqmod bypass=0
#icap_service svcLogger reqmod_precache icap://icap2.mydomain.net:1344/respmod routing=on
#Default:
# none

#  TAG: icap_class
#	This deprecated option was documented to define an ICAP service
#	chain, even though it actually defined a set of similar, redundant
#	services, and the chains were not supported. 
#
#	To define a set of redundant services, please use the
#	adaptation_service_set directive. For service chains, use
#	adaptation_service_chain.
#Default:
# none

#  TAG: icap_access
#	This option is deprecated. Please use adaptation_access, which
#	has the same ICAP functionality, but comes with better
#	documentation, and eCAP support.
#Default:
# none

# eCAP OPTIONS
# -----------------------------------------------------------------------------

#  TAG: ecap_enable	on|off
#	Controls whether eCAP support is enabled.
#Default:
# ecap_enable off

#  TAG: ecap_service
#	Defines a single eCAP service
#
#	ecap_service id vectoring_point uri [option ...]
#
#        id: ID
#		an opaque identifier or name which is used to direct traffic to
#		this specific service. Must be unique among all adaptation
#		services in squid.conf.
#
#	vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
#		This specifies at which point of transaction processing the
#		eCAP service should be activated. *_postcache vectoring points
#		are not yet supported.
#
#	uri: ecap://vendor/service_name?custom&cgi=style&parameters=optional
#		Squid uses the eCAP service URI to match this configuration
#		line with one of the dynamically loaded services. Each loaded
#		eCAP service must have a unique URI. Obtain the right URI from
#		the service provider.
#
#	To activate a service, use the adaptation_access directive. To group
#	services, use adaptation_service_chain and adaptation_service_set.
#
#	Service options are separated by white space. eCAP services support
#	the following name=value options:
#
#	bypass=on|off|1|0
#		If set to 'on' or '1', the eCAP service is treated as optional.
#		If the service cannot be reached or malfunctions, Squid will try
#		to ignore any errors and process the message as if the service
#		was not enabled. No all eCAP errors can be bypassed.
#		If set to 'off' or '0', the eCAP service is treated as essential
#		and all eCAP errors will result in an error page returned to the
#		HTTP client.
#
#                Bypass is off by default: services are treated as essential.
#
#	routing=on|off|1|0
#		If set to 'on' or '1', the eCAP service is allowed to
#		dynamically change the current message adaptation plan by
#		returning a chain of services to be used next.
#
#		Dynamic adaptation plan may cross or cover multiple supported
#		vectoring points in their natural processing order.
#
#		Routing is not allowed by default.
#
#	Older ecap_service format without optional named parameters is
#	deprecated but supported for backward compatibility.
#
#
#Example:
#ecap_service s1 reqmod_precache ecap://filters.R.us/leakDetector?on_error=block bypass=off
#ecap_service s2 respmod_precache ecap://filters.R.us/virusFilter config=/etc/vf.cfg bypass=on
#Default:
# none

#  TAG: loadable_modules
#	Instructs Squid to load the specified dynamic module(s) or activate
#	preloaded module(s).
#Example:
#loadable_modules /usr/lib/MinimalAdapter.so
#Default:
# none

# MESSAGE ADAPTATION OPTIONS
# -----------------------------------------------------------------------------

#  TAG: adaptation_service_set
#
#	Configures an ordered set of similar, redundant services. This is
#	useful when hot standby or backup adaptation servers are available.
#
#	    adaptation_service_set set_name service_name1 service_name2 ...
#
# 	The named services are used in the set declaration order. The first
#	applicable adaptation service from the set is used first. The next
#	applicable service is tried if and only if the transaction with the
#	previous service fails and the message waiting to be adapted is still
#	intact.
#
#	When adaptation starts, broken services are ignored as if they were
#	not a part of the set. A broken service is a down optional service.
#
#	The services in a set must be attached to the same vectoring point
#	(e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
#
#	If all services in a set are optional then adaptation failures are
#	bypassable. If all services in the set are essential, then a
#	transaction failure with one service may still be retried using
#	another service from the set, but when all services fail, the master
#	transaction fails as well.
#
#	A set may contain a mix of optional and essential services, but that
#	is likely to lead to surprising results because broken services become
#	ignored (see above), making previously bypassable failures fatal.
#	Technically, it is the bypassability of the last failed service that
#	matters.
#
#	See also: adaptation_access adaptation_service_chain
#
#Example:
#adaptation_service_set svcBlocker urlFilterPrimary urlFilterBackup
#adaptation service_set svcLogger loggerLocal loggerRemote
#Default:
# none

#  TAG: adaptation_service_chain
#
#	Configures a list of complementary services that will be applied
#	one-by-one, forming an adaptation chain or pipeline. This is useful
#	when Squid must perform different adaptations on the same message.
#
#	    adaptation_service_chain chain_name service_name1 svc_name2 ...
#
# 	The named services are used in the chain declaration order. The first
#	applicable adaptation service from the chain is used first. The next
#	applicable service is applied to the successful adaptation results of
#	the previous service in the chain.
#
#	When adaptation starts, broken services are ignored as if they were
#	not a part of the chain. A broken service is a down optional service.
#
#	Request satisfaction terminates the adaptation chain because Squid
#	does not currently allow declaration of RESPMOD services at the
#	"reqmod_precache" vectoring point (see icap_service or ecap_service).
#
#	The services in a chain must be attached to the same vectoring point
#	(e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
#
#	A chain may contain a mix of optional and essential services. If an
#	essential adaptation fails (or the failure cannot be bypassed for
#	other reasons), the master transaction fails. Otherwise, the failure
#	is bypassed as if the failed adaptation service was not in the chain.
#
#	See also: adaptation_access adaptation_service_set
#
#Example:
#adaptation_service_chain svcRequest requestLogger urlFilter leakDetector
#Default:
# none

#  TAG: adaptation_access
#	Sends an HTTP transaction to an ICAP or eCAP adaptation	service.
#
#	adaptation_access service_name allow|deny [!]aclname...
#	adaptation_access set_name     allow|deny [!]aclname...
#
#	At each supported vectoring point, the adaptation_access
#	statements are processed in the order they appear in this
#	configuration file. Statements pointing to the following services
#	are ignored (i.e., skipped without checking their ACL):
#
#	    - services serving different vectoring points
#	    - "broken-but-bypassable" services
#	    - "up" services configured to ignore such transactions
#              (e.g., based on the ICAP Transfer-Ignore header).
#
#        When a set_name is used, all services in the set are checked
#	using the same rules, to find the first applicable one. See
#	adaptation_service_set for details.
#
#	If an access list is checked and there is a match, the
#	processing stops: For an "allow" rule, the corresponding
#	adaptation service is used for the transaction. For a "deny"
#	rule, no adaptation service is activated.
#
#	It is currently not possible to apply more than one adaptation
#	service at the same vectoring point to the same HTTP transaction.
#
#        See also: icap_service and ecap_service
#
#Example:
#adaptation_access service_1 allow all
#Default:
# Allow, unless rules exist in squid.conf.

#  TAG: adaptation_service_iteration_limit
#	Limits the number of iterations allowed when applying adaptation
#	services to a message. If your longest adaptation set or chain
#	may have more than 16 services, increase the limit beyond its
#	default value of 16. If detecting infinite iteration loops sooner
#	is critical, make the iteration limit match the actual number
#	of services in your longest adaptation set or chain.
#
#	Infinite adaptation loops are most likely with routing services.
#
#	See also: icap_service routing=1
#Default:
# adaptation_service_iteration_limit 16

#  TAG: adaptation_masterx_shared_names
#	For each master transaction (i.e., the HTTP request and response
#	sequence, including all related ICAP and eCAP exchanges), Squid
#	maintains a table of metadata. The table entries are (name, value)
#	pairs shared among eCAP and ICAP exchanges. The table is destroyed
#	with the master transaction.
#
#	This option specifies the table entry names that Squid must accept
#	from and forward to the adaptation transactions.
#
#	An ICAP REQMOD or RESPMOD transaction may set an entry in the 
#	shared table by returning an ICAP header field with a name 
#	specified in adaptation_masterx_shared_names.
#
#	An eCAP REQMOD or RESPMOD transaction may set an entry in the
#	shared table by implementing the libecap::visitEachOption() API
#	to provide an option with a name specified in
#	adaptation_masterx_shared_names.
#
#	Squid will store and forward the set entry to subsequent adaptation
#	transactions within the same master transaction scope.
#
#	Only one shared entry name is supported at this time.
#
#Example:
## share authentication information among ICAP services
#adaptation_masterx_shared_names X-Subscriber-ID
#Default:
# none

#  TAG: adaptation_meta
#	This option allows Squid administrator to add custom ICAP request
#	headers or eCAP options to Squid ICAP requests or eCAP transactions.
#	Use it to pass custom authentication tokens and other
#	transaction-state related meta information to an ICAP/eCAP service.
#	
#	The addition of a meta header is ACL-driven:
#		adaptation_meta name value [!]aclname ...
#	
#	Processing for a given header name stops after the first ACL list match.
#	Thus, it is impossible to add two headers with the same name. If no ACL
#	lists match for a given header name, no such header is added. For 
#	example:
#	
#		# do not debug transactions except for those that need debugging
#		adaptation_meta X-Debug 1 needs_debugging
#	
#		# log all transactions except for those that must remain secret
#		adaptation_meta X-Log 1 !keep_secret
#	
#		# mark transactions from users in the "G 1" group
#		adaptation_meta X-Authenticated-Groups "G 1" authed_as_G1
#	
#	The "value" parameter may be a regular squid.conf token or a "double
#	quoted string". Within the quoted string, use backslash (\) to escape
#	any character, which is currently only useful for escaping backslashes
#	and double quotes. For example,
#	    "this string has one backslash (\\) and two \"quotes\""
#
#	Used adaptation_meta header values may be logged via %note
#	logformat code. If multiple adaptation_meta headers with the same name
#	are used during master transaction lifetime, the header values are
#	logged in the order they were used and duplicate values are ignored
#	(only the first repeated value will be logged).
#Default:
# none

#  TAG: icap_retry
#	This ACL determines which retriable ICAP transactions are
#	retried. Transactions that received a complete ICAP response
#	and did not have to consume or produce HTTP bodies to receive
#	that response are usually retriable.
#
#	icap_retry allow|deny [!]aclname ...
#
#	Squid automatically retries some ICAP I/O timeouts and errors
#	due to persistent connection race conditions.
#
#	See also: icap_retry_limit
#Default:
# icap_retry deny all

#  TAG: icap_retry_limit
#	Limits the number of retries allowed.
#
#	Communication errors due to persistent connection race
#	conditions are unavoidable, automatically retried, and do not
#	count against this limit.
#
#	See also: icap_retry
#Default:
# No retries are allowed.

# DNS OPTIONS
# -----------------------------------------------------------------------------

#  TAG: check_hostnames
#	For security and stability reasons Squid can check
#	hostnames for Internet standard RFC compliance. If you want
#	Squid to perform these checks turn this directive on.
#Default:
# check_hostnames off

#  TAG: allow_underscore
#	Underscore characters is not strictly allowed in Internet hostnames
#	but nevertheless used by many sites. Set this to off if you want
#	Squid to be strict about the standard.
#	This check is performed only when check_hostnames is set to on.
#Default:
# allow_underscore on

#  TAG: dns_retransmit_interval
#	Initial retransmit interval for DNS queries. The interval is
#	doubled each time all configured DNS servers have been tried.
#Default:
# dns_retransmit_interval 5 seconds

#  TAG: dns_timeout
#	DNS Query timeout. If no response is received to a DNS query
#	within this time all DNS servers for the queried domain
#	are assumed to be unavailable.
#Default:
# dns_timeout 30 seconds

#  TAG: dns_packet_max
#	Maximum number of bytes packet size to advertise via EDNS.
#	Set to "none" to disable EDNS large packet support.
#	
#	For legacy reasons DNS UDP replies will default to 512 bytes which
#	is too small for many responses. EDNS provides a means for Squid to
#	negotiate receiving larger responses back immediately without having
#	to failover with repeat requests. Responses larger than this limit
#	will retain the old behaviour of failover to TCP DNS.
#	
#	Squid has no real fixed limit internally, but allowing packet sizes
#	over 1500 bytes requires network jumbogram support and is usually not
#	necessary.
#	
#	WARNING: The RFC also indicates that some older resolvers will reply
#	with failure of the whole request if the extension is added. Some
#	resolvers have already been identified which will reply with mangled
#	EDNS response on occasion. Usually in response to many-KB jumbogram
#	sizes being advertised by Squid.
#	Squid will currently treat these both as an unable-to-resolve domain
#	even if it would be resolvable without EDNS.
#Default:
# EDNS disabled

#  TAG: dns_defnames	on|off
#	Normally the RES_DEFNAMES resolver option is disabled
#	(see res_init(3)).  This prevents caches in a hierarchy
#	from interpreting single-component hostnames locally.  To allow
#	Squid to handle single-component names, enable this option.
#Default:
# Search for single-label domain names is disabled.

#  TAG: dns_multicast_local	on|off
#	When set to on, Squid sends multicast DNS lookups on the local
#	network for domains ending in .local and .arpa.
#	This enables local servers and devices to be contacted in an
#	ad-hoc or zero-configuration network environment.
#Default:
# Search for .local and .arpa names is disabled.

#  TAG: dns_nameservers
#	Use this if you want to specify a list of DNS name servers
#	(IP addresses) to use instead of those given in your
#	/etc/resolv.conf file.
#
#	On Windows platforms, if no value is specified here or in
#	the /etc/resolv.conf file, the list of DNS name servers are
#	taken from the Windows registry, both static and dynamic DHCP
#	configurations are supported.
#
#	Example: dns_nameservers 10.0.0.1 192.172.0.4
#Default:
# Use operating system definitions

#  TAG: hosts_file
#	Location of the host-local IP name-address associations
#	database. Most Operating Systems have such a file on different
#	default locations:
#	- Un*X & Linux:    /etc/hosts
#	- Windows NT/2000: %SystemRoot%\system32\drivers\etc\hosts
#			   (%SystemRoot% value install default is c:\winnt)
#	- Windows XP/2003: %SystemRoot%\system32\drivers\etc\hosts
#			   (%SystemRoot% value install default is c:\windows)
#	- Windows 9x/Me:   %windir%\hosts
#			   (%windir% value is usually c:\windows)
#	- Cygwin:	   /etc/hosts
#
#	The file contains newline-separated definitions, in the
#	form ip_address_in_dotted_form name [name ...] names are
#	whitespace-separated. Lines beginning with an hash (#)
#	character are comments.
#
#	The file is checked at startup and upon configuration.
#	If set to 'none', it won't be checked.
#	If append_domain is used, that domain will be added to
#	domain-local (i.e. not containing any dot character) host
#	definitions.
#Default:
# hosts_file /etc/hosts

#  TAG: append_domain
#	Appends local domain name to hostnames without any dots in
#	them.  append_domain must begin with a period.
#
#	Be warned there are now Internet names with no dots in
#	them using only top-domain names, so setting this may
#	cause some Internet sites to become unavailable.
#
#Example:
# append_domain .yourdomain.com
#Default:
# Use operating system definitions

#  TAG: ignore_unknown_nameservers
#	By default Squid checks that DNS responses are received
#	from the same IP addresses they are sent to.  If they
#	don't match, Squid ignores the response and writes a warning
#	message to cache.log.  You can allow responses from unknown
#	nameservers by setting this option to 'off'.
#Default:
# ignore_unknown_nameservers on

#  TAG: dns_v4_first
#	With the IPv6 Internet being as fast or faster than IPv4 Internet
#	for most networks Squid prefers to contact websites over IPv6.
#
#	This option reverses the order of preference to make Squid contact
#	dual-stack websites over IPv4 first. Squid will still perform both
#	IPv6 and IPv4 DNS lookups before connecting.
#
#	WARNING:
#	  This option will restrict the situations under which IPv6
#	  connectivity is used (and tested), potentially hiding network
#	  problems which would otherwise be detected and warned about.
#Default:
# dns_v4_first off

#  TAG: ipcache_size	(number of entries)
#	Maximum number of DNS IP cache entries.
#Default:
# ipcache_size 1024

#  TAG: ipcache_low	(percent)
#Default:
# ipcache_low 90

#  TAG: ipcache_high	(percent)
#	The size, low-, and high-water marks for the IP cache.
#Default:
# ipcache_high 95

#  TAG: fqdncache_size	(number of entries)
#	Maximum number of FQDN cache entries.
#Default:
# fqdncache_size 1024

# MISCELLANEOUS
# -----------------------------------------------------------------------------

#  TAG: configuration_includes_quoted_values	on|off
#	If set, Squid will recognize each "quoted string" after a configuration
#	directive as a single parameter. The quotes are stripped before the
#	parameter value is interpreted or used.
#	See "Values with spaces, quotes, and other special characters"
#	section for more details.
#Default:
# configuration_includes_quoted_values off

#  TAG: memory_pools	on|off
#	If set, Squid will keep pools of allocated (but unused) memory
#	available for future use.  If memory is a premium on your
#	system and you believe your malloc library outperforms Squid
#	routines, disable this.
#Default:
# memory_pools on

#  TAG: memory_pools_limit	(bytes)
#	Used only with memory_pools on:
#	memory_pools_limit 50 MB
#
#	If set to a non-zero value, Squid will keep at most the specified
#	limit of allocated (but unused) memory in memory pools. All free()
#	requests that exceed this limit will be handled by your malloc
#	library. Squid does not pre-allocate any memory, just safe-keeps
#	objects that otherwise would be free()d. Thus, it is safe to set
#	memory_pools_limit to a reasonably high value even if your
#	configuration will use less memory.
#
#	If set to none, Squid will keep all memory it can. That is, there
#	will be no limit on the total amount of memory used for safe-keeping.
#
#	To disable memory allocation optimization, do not set
#	memory_pools_limit to 0 or none. Set memory_pools to "off" instead.
#
#	An overhead for maintaining memory pools is not taken into account
#	when the limit is checked. This overhead is close to four bytes per
#	object kept. However, pools may actually _save_ memory because of
#	reduced memory thrashing in your malloc library.
#Default:
# memory_pools_limit 5 MB

#  TAG: forwarded_for	on|off|transparent|truncate|delete
#	If set to "on", Squid will append your client's IP address
#	in the HTTP requests it forwards. By default it looks like:
#
#		X-Forwarded-For: 192.1.2.3
#
#	If set to "off", it will appear as
#
#		X-Forwarded-For: unknown
#
#	If set to "transparent", Squid will not alter the
#	X-Forwarded-For header in any way.
#
#	If set to "delete", Squid will delete the entire
#	X-Forwarded-For header.
#
#	If set to "truncate", Squid will remove all existing
#	X-Forwarded-For entries, and place the client IP as the sole entry.
#Default:
# forwarded_for on

#  TAG: cachemgr_passwd
#	Specify passwords for cachemgr operations.
#
#	Usage: cachemgr_passwd password action action ...
#
#	Some valid actions are (see cache manager menu for a full list):
#		5min
#		60min
#		asndb
#		authenticator
#		cbdata
#		client_list
#		comm_incoming
#		config *
#		counters
#		delay
#		digest_stats
#		dns
#		events
#		filedescriptors
#		fqdncache
#		histograms
#		http_headers
#		info
#		io
#		ipcache
#		mem
#		menu
#		netdb
#		non_peers
#		objects
#		offline_toggle *
#		pconn
#		peer_select
#		reconfigure *
#		redirector
#		refresh
#		server_list
#		shutdown *
#		store_digest
#		storedir
#		utilization
#		via_headers
#		vm_objects
#
#	* Indicates actions which will not be performed without a
#	  valid password, others can be performed if not listed here.
#
#	To disable an action, set the password to "disable".
#	To allow performing an action without a password, set the
#	password to "none".
#
#	Use the keyword "all" to set the same password for all actions.
#
#Example:
# cachemgr_passwd secret shutdown
# cachemgr_passwd lesssssssecret info stats/objects
# cachemgr_passwd disable all
#Default:
# No password. Actions which require password are denied.

#  TAG: client_db	on|off
#	If you want to disable collecting per-client statistics,
#	turn off client_db here.
#Default:
# client_db on

#  TAG: refresh_all_ims	on|off
#	When you enable this option, squid will always check
#	the origin server for an update when a client sends an
#	If-Modified-Since request.  Many browsers use IMS
#	requests when the user requests a reload, and this
#	ensures those clients receive the latest version.
#
#	By default (off), squid may return a Not Modified response
#	based on the age of the cached version.
#Default:
# refresh_all_ims off

#  TAG: reload_into_ims	on|off
#	When you enable this option, client no-cache or ``reload''
#	requests will be changed to If-Modified-Since requests.
#	Doing this VIOLATES the HTTP standard.  Enabling this
#	feature could make you liable for problems which it
#	causes.
#
#	see also refresh_pattern for a more selective approach.
#Default:
# reload_into_ims off

#  TAG: connect_retries
#	This sets the maximum number of connection attempts made for each
#	TCP connection. The connect_retries attempts must all still
#	complete within the connection timeout period.
#
#	The default is not to re-try if the first connection attempt fails.
#	The (not recommended) maximum is 10 tries.
#
#	A warning message will be generated if it is set to a too-high
#	value and the configured value will be over-ridden.
#
#	Note: These re-tries are in addition to forward_max_tries
#	which limit how many different addresses may be tried to find
#	a useful server.
#Default:
# Do not retry failed connections.

#  TAG: retry_on_error
#	If set to ON Squid will automatically retry requests when
#	receiving an error response with status 403 (Forbidden),
#	500 (Internal Error), 501 or 503 (Service not available).
#	Status 502 and 504 (Gateway errors) are always retried.
#	
#	This is mainly useful if you are in a complex cache hierarchy to
#	work around access control errors.
#	
#	NOTE: This retry will attempt to find another working destination.
#	Which is different from the server which just failed.
#Default:
# retry_on_error off

#  TAG: as_whois_server
#	WHOIS server to query for AS numbers.  NOTE: AS numbers are
#	queried only when Squid starts up, not for every request.
#Default:
# as_whois_server whois.ra.net

#  TAG: offline_mode
#	Enable this option and Squid will never try to validate cached
#	objects.
#Default:
# offline_mode off

#  TAG: uri_whitespace
#	What to do with requests that have whitespace characters in the
#	URI.  Options:
#
#	strip:  The whitespace characters are stripped out of the URL.
#		This is the behavior recommended by RFC2396 and RFC3986
#		for tolerant handling of generic URI.
#		NOTE: This is one difference between generic URI and HTTP URLs.
#
#	deny:   The request is denied.  The user receives an "Invalid
#		Request" message.
#		This is the behaviour recommended by RFC2616 for safe
#		handling of HTTP request URL.
#
#	allow:  The request is allowed and the URI is not changed.  The
#		whitespace characters remain in the URI.  Note the
#		whitespace is passed to redirector processes if they
#		are in use.
#		Note this may be considered a violation of RFC2616
#		request parsing where whitespace is prohibited in the
#		URL field.
#
#	encode:	The request is allowed and the whitespace characters are
#		encoded according to RFC1738.
#
#	chop:	The request is allowed and the URI is chopped at the
#		first whitespace.
#
#
#	NOTE the current Squid implementation of encode and chop violates
#	RFC2616 by not using a 301 redirect after altering the URL.
#Default:
# uri_whitespace strip

#  TAG: chroot
#	Specifies a directory where Squid should do a chroot() while
#	initializing.  This also causes Squid to fully drop root
#	privileges after initializing.  This means, for example, if you
#	use a HTTP port less than 1024 and try to reconfigure, you may
#	get an error saying that Squid can not open the port.
#Default:
# none

#  TAG: balance_on_multiple_ip
#	Modern IP resolvers in squid sort lookup results by preferred access.
#	By default squid will use these IP in order and only rotates to
#	the next listed when the most preffered fails.
#
#	Some load balancing servers based on round robin DNS have been
#	found not to preserve user session state across requests
#	to different IP addresses.
#
#	Enabling this directive Squid rotates IP's per request.
#Default:
# balance_on_multiple_ip off

#  TAG: pipeline_prefetch
#	HTTP clients may send a pipeline of 1+N requests to Squid using a
#	single connection, without waiting for Squid to respond to the first
#	of those requests. This option limits the number of concurrent
#	requests Squid will try to handle in parallel. If set to N, Squid
#	will try to receive and process up to 1+N requests on the same
#	connection concurrently.
#
#	Defaults to 0 (off) for bandwidth management and access logging
#	reasons.
#
#	NOTE: pipelining requires persistent connections to clients.
#
#	WARNING: pipelining breaks NTLM and Negotiate/Kerberos authentication.
#Default:
# Do not pre-parse pipelined requests.

#  TAG: high_response_time_warning	(msec)
#	If the one-minute median response time exceeds this value,
#	Squid prints a WARNING with debug level 0 to get the
#	administrators attention.  The value is in milliseconds.
#Default:
# disabled.

#  TAG: high_page_fault_warning
#	If the one-minute average page fault rate exceeds this
#	value, Squid prints a WARNING with debug level 0 to get
#	the administrators attention.  The value is in page faults
#	per second.
#Default:
# disabled.

#  TAG: high_memory_warning
# Note: This option is only available if Squid is rebuilt with the
#       GNU Malloc with mstats()
#
#	If the memory usage (as determined by gnumalloc, if available and used)
#	exceeds	this amount, Squid prints a WARNING with debug level 0 to get
#	the administrators attention.
#Default:
# disabled.

#  TAG: sleep_after_fork	(microseconds)
#	When this is set to a non-zero value, the main Squid process
#	sleeps the specified number of microseconds after a fork()
#	system call. This sleep may help the situation where your
#	system reports fork() failures due to lack of (virtual)
#	memory. Note, however, if you have a lot of child
#	processes, these sleep delays will add up and your
#	Squid will not service requests for some amount of time
#	until all the child processes have been started.
#	On Windows value less then 1000 (1 milliseconds) are
#	rounded to 1000.
#Default:
# sleep_after_fork 0

#  TAG: windows_ipaddrchangemonitor	on|off
# Note: This option is only available if Squid is rebuilt with the
#       MS Windows
#
#	On Windows Squid by default will monitor IP address changes and will 
#	reconfigure itself after any detected event. This is very useful for
#	proxies connected to internet with dial-up interfaces.
#	In some cases (a Proxy server acting as VPN gateway is one) it could be
#	desiderable to disable this behaviour setting this to 'off'.
#	Note: after changing this, Squid service must be restarted.
#Default:
# windows_ipaddrchangemonitor on

#  TAG: eui_lookup
#	Whether to lookup the EUI or MAC address of a connected client.
#Default:
# eui_lookup on

#  TAG: max_filedescriptors
#	Reduce the maximum number of filedescriptors supported below
#	the usual operating system defaults.
#
#	Remove from squid.conf to inherit the current ulimit setting.
#
#	Note: Changing this requires a restart of Squid. Also
#	not all I/O types supports large values (eg on Windows).
#Default:
# Use operating system limits set by ulimit.

ini 乘客

.conf
http {
      ...
      passenger_root /home/ubuntu/.rvm/gems/ruby-2.4.2/gems/passenger-6.0.0;
      passenger_ruby /home/ubuntu/.rvm/gems/ruby-2.4.2/wrappers/ruby;
      ...
  }

root /home/ubuntu/landlord-webadmn/public;
        client_max_body_size 4G;
        passenger_enabled on;
        rails_env    production;

ini 我的tmux conf

我的tmux conf

.tmux.conf
unbind C-b
set-option -g prefix C-a
bind-key C-a send-prefix

# switch panes using Alt-arrow without prefix
bind -n M-Left select-pane -L
bind -n M-Right select-pane -R
bind -n M-Up select-pane -U
bind -n M-Down select-pane -D

setw -g monitor-activity on
set -g visual-activity on

set-window-option -g window-status-current-bg yellow

# Enable mouse mode (tmux 2.1 and above)
set -g mouse on
bind -t vi-copy y copy-pipe "xclip -sel clip -i"
set -g default-terminal "screen-256color"