components/openstack/swift/files/proxy-server.conf
changeset 1944 56ac2df1785b
parent 1896 f83e6dde6c3b
child 3998 5bd484384122
--- a/components/openstack/swift/files/proxy-server.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/proxy-server.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -4,37 +4,59 @@
 # bind_timeout = 30
 # backlog = 4096
 # swift_dir = /etc/swift
-# workers = 1
 # user = swift
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.  Should default to the number of effective cpu
+# cores in the system.  It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
 # Set the following two lines to enable SSL. This is for testing only.
 # cert_file = /etc/swift/proxy.crt
 # key_file = /etc/swift/proxy.key
+#
 # expiring_objects_container_divisor = 86400
+#
 # You can specify default log routing here if you want:
 # log_name = swift
 # log_facility = LOG_LOCAL0
 # log_level = INFO
-# log_headers = False
+# log_headers = false
 # log_address = /dev/log
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
 # comma separated list of functions to call to setup custom log handlers.
 # functions get passed: conf, name, log_to_console, log_route, fmt, logger,
 # adapted_logger
 # log_custom_handlers =
+#
 # If set, log_udp_host will override log_address
 # log_udp_host =
 # log_udp_port = 514
+#
 # You can enable StatsD logging here:
 # log_statsd_host = localhost
 # log_statsd_port = 8125
 # log_statsd_default_sample_rate = 1.0
 # log_statsd_sample_rate_factor = 1.0
 # log_statsd_metric_prefix =
+#
 # Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
 # cors_allow_origin =
+#
+# client_timeout = 60
 # eventlet_debug = false
 
 [pipeline:main]
-pipeline = catch_errors healthcheck proxy-logging cache slo ratelimit tempauth authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
+pipeline = catch_errors healthcheck proxy-logging cache bulk slo ratelimit tempauth authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
 
 [app:proxy-server]
 use = egg:swift#proxy
@@ -43,61 +65,123 @@
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
 # set log_address = /dev/log
-# log_handoffs = True
+#
+# log_handoffs = true
 # recheck_account_existence = 60
 # recheck_container_existence = 60
 # object_chunk_size = 8192
 # client_chunk_size = 8192
 # node_timeout = 10
-# client_timeout = 60
 # conn_timeout = 0.5
+#
 # How long without an error before a node's error count is reset. This will
 # also be how long before a node is reenabled after suppression is triggered.
 # error_suppression_interval = 60
+#
 # How many errors can accumulate before a node is temporarily ignored.
 # error_suppression_limit = 10
+#
 # If set to 'true' any authorized user may create and delete accounts; if
 # 'false' no one, even authorized, can.
 # allow_account_management = false
+#
 # Set object_post_as_copy = false to turn on fast posts where only the metadata
 # changes are stored anew and the original data file is kept in place. This
 # makes for quicker posts; but since the container metadata isn't updated in
 # this mode, features like container sync won't be able to sync posts.
 # object_post_as_copy = true
+#
 # If set to 'true' authorized accounts that do not yet exist within the Swift
 # cluster will be automatically created.
 account_autocreate = true
+#
 # If set to a positive value, trying to create a container when the account
 # already has at least this maximum containers will result in a 403 Forbidden.
 # Note: This is a soft limit, meaning a user might exceed the cap for
 # recheck_account_existence before the 403s kick in.
 # max_containers_per_account = 0
+#
 # This is a comma separated list of account hashes that ignore the
 # max_containers_per_account cap.
 # max_containers_whitelist =
+#
 # Comma separated list of Host headers to which the proxy will deny requests.
 # deny_host_headers =
+#
 # Prefix used when automatically creating accounts.
 # auto_create_account_prefix = .
+#
 # Depth of the proxy put queue.
 # put_queue_depth = 10
+#
 # Start rate-limiting object segment serving after the Nth segment of a
 # segmented object.
 # rate_limit_after_segment = 10
+#
 # Once segment rate-limiting kicks in for an object, limit segments served
 # to N per second.
 # rate_limit_segments_per_sec = 1
-# Storage nodes can be chosen at random (shuffle) or by using timing
-# measurements. Using timing measurements may allow for lower overall latency.
-# The valid values for sorting_method are "shuffle" and "timing"
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", and "timing".
 # sorting_method = shuffle
-# If the timing sorting_method is used, the timings will only be valid for
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
 # the number of seconds configured by timing_expiry.
 # timing_expiry = 300
+#
 # If set to false will treat objects with X-Static-Large-Object header set
 # as a regular object on GETs, i.e. will return that object's contents. Should
 # be set to false if slo is not used in pipeline.
 # allow_static_large_object = true
+#
+# The maximum time (seconds) that a large object connection is allowed to last.
+# max_large_object_get_time = 86400
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2
+
 
 [filter:tempauth]
 use = egg:swift#tempauth
@@ -105,26 +189,31 @@
 # set log_name = tempauth
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # The reseller prefix will verify a token begins with this prefix before even
 # attempting to validate it. Also, with authorization, only Swift storage
 # accounts with this prefix will be authorized by this middleware. Useful if
 # multiple auth systems are in use for one Swift cluster.
 # reseller_prefix = AUTH
+#
 # The auth prefix will cause requests beginning with this prefix to be routed
 # to the auth subsystem, for granting tokens, etc.
 # auth_prefix = /auth/
 # token_life = 86400
+#
 # This allows middleware higher in the WSGI pipeline to override auth
 # processing, useful for middleware such as tempurl and formpost. If you know
 # you're not going to use such middleware and you want a bit of extra security,
 # you can set this to false.
 # allow_overrides = true
+#
 # This specifies what scheme to return with storage urls:
 # http, https, or default (chooses based on what the server is running as)
 # This can be useful with an SSL load balancer in front of a non-SSL server.
 # storage_url_scheme = default
+#
 # Lastly, you need to list all the accounts/users you want here. The format is:
 #   user_<account>_<user> = <key> [group] [group] [...] [storage_url]
 # or if you want underscores in <account> or <user>, you can base64 encode them
@@ -152,14 +241,12 @@
 #
 # You'll need to have as well the keystoneauth middleware enabled
 # and have it in your main pipeline so instead of having tempauth in
-# there you can change it to: authtoken keystone
+# there you can change it to: authtoken keystoneauth
 #
 [filter:authtoken]
 paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
 auth_uri = http://127.0.0.1:5000/
+identity_uri = http://127.0.0.1:35357
 admin_tenant_name = %SERVICE_TENANT_NAME%
 admin_user = %SERVICE_USER%
 admin_password = %SERVICE_PASSWORD%
@@ -173,6 +260,8 @@
 # Operator roles is the role which user would be allowed to manage a
 # tenant and be able to create container or give ACL to others.
 # operator_roles = admin, swiftoperator
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
 
 [filter:healthcheck]
 use = egg:swift#healthcheck
@@ -189,8 +278,9 @@
 # set log_name = cache
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # If not set here, the value for memcache_servers will be read from
 # memcache.conf (see memcache.conf-sample) or lacking that file, it will
 # default to the value below. You can specify multiple servers separated with
@@ -215,18 +305,23 @@
 # set log_name = ratelimit
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # clock_accuracy should represent how accurate the proxy servers' system clocks
 # are with each other. 1000 means that all the proxies' clock are accurate to
 # each other within 1 millisecond.  No ratelimit should be higher than the
 # clock accuracy.
 # clock_accuracy = 1000
+#
 # max_sleep_time_seconds = 60
+#
 # log_sleep_time_seconds of 0 means disabled
 # log_sleep_time_seconds = 0
+#
 # allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
 # rate_buffer_seconds = 5
+#
 # account_ratelimit of 0 means disabled
 # account_ratelimit = 0
 
@@ -235,21 +330,28 @@
 # account_blacklist = c,d
 
 # with container_limit_x = r
-# for containers of size x limit requests per second to r.  The container
+# for containers of size x limit write requests per second to r.  The container
 # rate will be linearly interpolated from the values given. With the values
 # below, a container of size 5 will get a rate of 75.
 # container_ratelimit_0 = 100
 # container_ratelimit_10 = 50
 # container_ratelimit_50 = 20
 
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
 [filter:domain_remap]
 use = egg:swift#domain_remap
 # You can override the default log routing for this filter here:
 # set log_name = domain_remap
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # storage_domain = example.com
 # path_root = v1
 # reseller_prefixes = AUTH
@@ -260,7 +362,7 @@
 # set log_name = catch_errors
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
 
 [filter:cname_lookup]
@@ -270,20 +372,21 @@
 # set log_name = cname_lookup
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # storage_domain = example.com
 # lookup_depth = 1
 
 # Note: Put staticweb just after your auth filter(s) in the pipeline
 [filter:staticweb]
 use = egg:swift#staticweb
-# Seconds to cache container x-container-meta-web-* header values.
-# cache_timeout = 300
 
 # Note: Put tempurl just before your auth filter(s) in the pipeline
 [filter:tempurl]
 use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT
 #
 # The headers to remove from incoming requests. Simply a whitespace delimited
 # list of header names and names can optionally end with '*' to indicate a
@@ -329,19 +432,35 @@
 # access_log_facility = LOG_LOCAL0
 # access_log_level = INFO
 # access_log_address = /dev/log
+#
 # If set, access_log_udp_host will override access_log_address
 # access_log_udp_host =
 # access_log_udp_port = 514
+#
 # You can use log_statsd_* from [DEFAULT] or override them here:
 # access_log_statsd_host = localhost
 # access_log_statsd_port = 8125
 # access_log_statsd_default_sample_rate = 1.0
 # access_log_statsd_sample_rate_factor = 1.0
 # access_log_statsd_metric_prefix =
-# access_log_headers = False
+# access_log_headers = false
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 8192
+#
 # What HTTP methods are allowed for StatsD logging (comma-sep); request methods
 # not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
 # log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
 # Note: The double proxy-logging in the pipeline is not a mistake. The
 # left-most proxy-logging is there to log requests that were handled in
 # middleware and never made it through to the right-most middleware (and
@@ -352,8 +471,9 @@
 [filter:bulk]
 use = egg:swift#bulk
 # max_containers_per_extraction = 10000
-# max_failed_files = 1000
-# max_deletes_per_request = 1000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# yield_frequency = 60
 
 # Note: Put after auth in the pipeline.
 [filter:container-quotas]