|
@@ -0,0 +1,720 @@
|
|
|
|
|
+; Upgrading CouchDB will overwrite this file.
|
|
|
|
|
+[vendor]
|
|
|
|
|
+name = The Apache Software Foundation
|
|
|
|
|
+
|
|
|
|
|
+[couchdb]
|
|
|
|
|
+uuid =
|
|
|
|
|
+database_dir = ./data
|
|
|
|
|
+view_index_dir = ./data
|
|
|
|
|
+; util_driver_dir =
|
|
|
|
|
+; plugin_dir =
|
|
|
|
|
+;os_process_timeout = 5000 ; 5 seconds. for view servers.
|
|
|
|
|
+
|
|
|
|
|
+; Maximum number of .couch files to open at once.
|
|
|
|
|
+; The actual limit may be slightly lower depending on how
|
|
|
|
|
+; many schedulers you have as the allowance is divided evenly
|
|
|
|
|
+; among them.
|
|
|
|
|
+;max_dbs_open = 500
|
|
|
|
|
+
|
|
|
|
|
+; Method used to compress everything that is appended to database and view index files, except
|
|
|
|
|
+; for attachments (see the attachments section). Available methods are:
|
|
|
|
|
+;
|
|
|
|
|
+; none - no compression
|
|
|
|
|
+; snappy - use google snappy, a very fast compressor/decompressor
|
|
|
|
|
+; deflate_N - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
|
|
|
|
|
+; lowest compression ratio) to 9 (slowest, highest compression ratio)
|
|
|
|
|
+;file_compression = snappy
|
|
|
|
|
+; Higher values may give better read performance due to less read operations
|
|
|
|
|
+; and/or more OS page cache hits, but they can also increase overall response
|
|
|
|
|
+; time for writes when there are many attachment write requests in parallel.
|
|
|
|
|
+;attachment_stream_buffer_size = 4096
|
|
|
|
|
+; Default security object for databases if not explicitly set
|
|
|
|
|
+; everyone - same as couchdb 1.0, everyone can read/write
|
|
|
|
|
+; admin_only - only admins can read/write
|
|
|
|
|
+; admin_local - sharded dbs on :5984 are read/write for everyone,
|
|
|
|
|
+; local dbs on :5986 are read/write for admins only
|
|
|
|
|
+;default_security = admin_only
|
|
|
|
|
+; btree_chunk_size = 1279
|
|
|
|
|
+; maintenance_mode = false
|
|
|
|
|
+; stem_interactive_updates = true
|
|
|
|
|
+; uri_file =
|
|
|
|
|
+; The speed of processing the _changes feed with doc_ids filter can be
|
|
|
|
|
+; influenced directly with this setting - increase for faster processing at the
|
|
|
|
|
+; expense of more memory usage.
|
|
|
|
|
+;changes_doc_ids_optimization_threshold = 100
|
|
|
|
|
+; Maximum document ID length. Can be set to an integer or 'infinity'.
|
|
|
|
|
+;max_document_id_length = infinity
|
|
|
|
|
+;
|
|
|
|
|
+; Limit maximum document size. Requests to create / update documents with a body
|
|
|
|
|
+; size larger than this will fail with a 413 http error. This limit applies to
|
|
|
|
|
+; requests which update a single document as well as individual documents from
|
|
|
|
|
+; a _bulk_docs request. The size limit is approximate due to the nature of JSON
|
|
|
|
|
+; encoding.
|
|
|
|
|
+;max_document_size = 8000000 ; bytes
|
|
|
|
|
+;
|
|
|
|
|
+; Maximum attachment size.
|
|
|
|
|
+; max_attachment_size = 1073741824 ; 1 gibibyte
|
|
|
|
|
+;
|
|
|
|
|
+; Do not update the least recently used DB cache on reads, only writes
|
|
|
|
|
+;update_lru_on_read = false
|
|
|
|
|
+;
|
|
|
|
|
+; The default storage engine to use when creating databases
|
|
|
|
|
+; is set as a key into the [couchdb_engines] section.
|
|
|
|
|
+;default_engine = couch
|
|
|
|
|
+;
|
|
|
|
|
+; Enable this to only "soft-delete" databases when DELETE /{db} requests are
|
|
|
|
|
+; made. This will place a .recovery directory in your data directory and
|
|
|
|
|
+; move deleted databases/shards there instead. You can then manually delete
|
|
|
|
|
+; these files later, as desired.
|
|
|
|
|
+;enable_database_recovery = false
|
|
|
|
|
+;
|
|
|
|
|
+; Set the maximum size allowed for a partition. This helps users avoid
|
|
|
|
|
+; inadvertently abusing partitions resulting in hot shards. The default
|
|
|
|
|
+; is 10GiB. A value of 0 or less will disable partition size checks.
|
|
|
|
|
+;max_partition_size = 10737418240
|
|
|
|
|
+;
|
|
|
|
|
+; When true, system databases _users and _replicator are created immediately
|
|
|
|
|
+; on startup if not present.
|
|
|
|
|
+;single_node = false
|
|
|
|
|
+
|
|
|
|
|
+; Allow edits on the _security object in the user db. By default, it's disabled.
|
|
|
|
|
+;users_db_security_editable = false
|
|
|
|
|
+
|
|
|
|
|
+[purge]
|
|
|
|
|
+; Allowed maximum number of documents in one purge request
|
|
|
|
|
+;max_document_id_number = 100
|
|
|
|
|
+;
|
|
|
|
|
+; Allowed maximum number of accumulated revisions in one purge request
|
|
|
|
|
+;max_revisions_number = 1000
|
|
|
|
|
+;
|
|
|
|
|
+; Allowed durations when index is not updated for local purge checkpoint
|
|
|
|
|
+; document. Default is 24 hours.
|
|
|
|
|
+;index_lag_warn_seconds = 86400
|
|
|
|
|
+
|
|
|
|
|
+[couchdb_engines]
|
|
|
|
|
+; The keys in this section are the filename extension that
|
|
|
|
|
+; the specified engine module will use. This is important so
|
|
|
|
|
+; that couch_server is able to find an existing database without
|
|
|
|
|
+; having to ask every configured engine.
|
|
|
|
|
+couch = couch_bt_engine
|
|
|
|
|
+
|
|
|
|
|
+[process_priority]
|
|
|
|
|
+; Selectively disable altering process priorities for modules that request it.
|
|
|
|
|
+; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
|
|
|
|
|
+; failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
|
|
|
|
|
+; enable when running with those versions.
|
|
|
|
|
+;couch_server = false
|
|
|
|
|
+
|
|
|
|
|
+[cluster]
|
|
|
|
|
+;q=2
|
|
|
|
|
+;n=3
|
|
|
|
|
+; placement = metro-dc-a:2,metro-dc-b:1
|
|
|
|
|
+
|
|
|
|
|
+; Supply a comma-delimited list of node names that this node should
|
|
|
|
|
+; contact in order to join a cluster. If a seedlist is configured the ``_up``
|
|
|
|
|
+; endpoint will return a 404 until the node has successfully contacted at
|
|
|
|
|
+; least one of the members of the seedlist and replicated an up-to-date copy
|
|
|
|
|
+; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
|
|
|
|
|
+; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
|
|
|
|
|
+
|
|
|
|
|
+[chttpd]
|
|
|
|
|
+; These settings affect the main, clustered port (5984 by default).
|
|
|
|
|
+port = 5984
|
|
|
|
|
+bind_address = 127.0.0.1
|
|
|
|
|
+;backlog = 512
|
|
|
|
|
+;socket_options = [{sndbuf, 262144}, {nodelay, true}]
|
|
|
|
|
+;server_options = [{recbuf, undefined}]
|
|
|
|
|
+;require_valid_user = false
|
|
|
|
|
+; require_valid_user_except_for_up = false
|
|
|
|
|
+; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
|
|
|
|
|
+; If Server header is left out, Mochiweb will add its own one in.
|
|
|
|
|
+;prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
|
|
|
|
|
+;
|
|
|
|
|
+; Limit maximum number of databases when tying to get detailed information using
|
|
|
|
|
+; _dbs_info in a request
|
|
|
|
|
+;max_db_number_for_dbs_info_req = 100
|
|
|
|
|
+
|
|
|
|
|
+; set to true to delay the start of a response until the end has been calculated
|
|
|
|
|
+;buffer_response = false
|
|
|
|
|
+
|
|
|
|
|
+; authentication handlers
|
|
|
|
|
+; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
|
|
|
|
|
+; uncomment the next line to enable proxy authentication
|
|
|
|
|
+; authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
|
|
|
|
|
+; uncomment the next line to enable JWT authentication
|
|
|
|
|
+; authentication_handlers = {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
|
|
|
|
|
+
|
|
|
|
|
+; prevent non-admins from accessing /_all_dbs
|
|
|
|
|
+; admin_only_all_dbs = true
|
|
|
|
|
+
|
|
|
|
|
+; These options are moved from [httpd]
|
|
|
|
|
+;secure_rewrites = true
|
|
|
|
|
+;allow_jsonp = false
|
|
|
|
|
+
|
|
|
|
|
+;enable_cors = false
|
|
|
|
|
+;enable_xframe_options = false
|
|
|
|
|
+
|
|
|
|
|
+; CouchDB can optionally enforce a maximum uri length;
|
|
|
|
|
+;max_uri_length = 8000
|
|
|
|
|
+
|
|
|
|
|
+;changes_timeout = 60000
|
|
|
|
|
+;config_whitelist =
|
|
|
|
|
+;rewrite_limit = 100
|
|
|
|
|
+;x_forwarded_host = X-Forwarded-Host
|
|
|
|
|
+;x_forwarded_proto = X-Forwarded-Proto
|
|
|
|
|
+;x_forwarded_ssl = X-Forwarded-Ssl
|
|
|
|
|
+
|
|
|
|
|
+; Maximum allowed http request size. Applies to both clustered and local port.
|
|
|
|
|
+;max_http_request_size = 4294967296 ; 4GB
|
|
|
|
|
+
|
|
|
|
|
+; Set to true to decode + to space in db and doc_id parts.
|
|
|
|
|
+; decode_plus_to_space = true
|
|
|
|
|
+
|
|
|
|
|
+;[jwt_auth]
|
|
|
|
|
+; List of claims to validate
|
|
|
|
|
+; can be the name of a claim like "exp" or a tuple if the claim requires
|
|
|
|
|
+; a parameter
|
|
|
|
|
+; required_claims = exp, {iss, "IssuerNameHere"}
|
|
|
|
|
+; roles_claim_name = https://example.com/roles
|
|
|
|
|
+;
|
|
|
|
|
+; [jwt_keys]
|
|
|
|
|
+; Configure at least one key here if using the JWT auth handler.
|
|
|
|
|
+; If your JWT tokens do not include a "kid" attribute, use "_default"
|
|
|
|
|
+; as the config key, otherwise use the kid as the config key.
|
|
|
|
|
+; Examples
|
|
|
|
|
+; hmac:_default = aGVsbG8=
|
|
|
|
|
+; hmac:foo = aGVsbG8=
|
|
|
|
|
+; The config values can represent symmetric and asymmetrics keys.
|
|
|
|
|
+; For symmetrics keys, the value is base64 encoded;
|
|
|
|
|
+; hmac:_default = aGVsbG8= # base64-encoded form of "hello"
|
|
|
|
|
+; For asymmetric keys, the value is the PEM encoding of the public
|
|
|
|
|
+; key with newlines replaced with the escape sequence \n.
|
|
|
|
|
+; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
|
|
|
|
|
+; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
|
|
|
|
|
+
|
|
|
|
|
+[couch_peruser]
|
|
|
|
|
+; If enabled, couch_peruser ensures that a private per-user database
|
|
|
|
|
+; exists for each document in _users. These databases are writable only
|
|
|
|
|
+; by the corresponding user. Databases are in the following form:
|
|
|
|
|
+; userdb-{hex encoded username}
|
|
|
|
|
+;enable = false
|
|
|
|
|
+; If set to true and a user is deleted, the respective database gets
|
|
|
|
|
+; deleted as well.
|
|
|
|
|
+;delete_dbs = false
|
|
|
|
|
+; Set a default q value for peruser-created databases that is different from
|
|
|
|
|
+; cluster / q
|
|
|
|
|
+;q = 1
|
|
|
|
|
+; prefix for user databases. If you change this after user dbs have been
|
|
|
|
|
+; created, the existing databases won't get deleted if the associated user
|
|
|
|
|
+; gets deleted because of the then prefix mismatch.
|
|
|
|
|
+;database_prefix = userdb-
|
|
|
|
|
+
|
|
|
|
|
+[httpd]
|
|
|
|
|
+port = 5986
|
|
|
|
|
+bind_address = 127.0.0.1
|
|
|
|
|
+;authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
|
|
|
|
|
+
|
|
|
|
|
+; Options for the MochiWeb HTTP server.
|
|
|
|
|
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
|
|
|
|
|
+; For more socket options, consult Erlang's module 'inet' man page.
|
|
|
|
|
+;socket_options = [{recbuf, undefined}, {sndbuf, 262144}, {nodelay, true}]
|
|
|
|
|
+;socket_options = [{sndbuf, 262144}]
|
|
|
|
|
+
|
|
|
|
|
+; These settings were moved to [chttpd]
|
|
|
|
|
+; secure_rewrites, allow_jsonp, enable_cors, enable_xframe_options,
|
|
|
|
|
+; max_uri_length, changes_timeout, config_whitelist, rewrite_limit,
|
|
|
|
|
+; x_forwarded_host, x_forwarded_proto, x_forwarded_ssl, max_http_request_size
|
|
|
|
|
+
|
|
|
|
|
+; [httpd_design_handlers]
|
|
|
|
|
+; _view =
|
|
|
|
|
+
|
|
|
|
|
+; [ioq]
|
|
|
|
|
+; concurrency = 10
|
|
|
|
|
+; ratio = 0.01
|
|
|
|
|
+
|
|
|
|
|
+[ssl]
|
|
|
|
|
+;port = 6984
|
|
|
|
|
+
|
|
|
|
|
+[chttpd_auth]
|
|
|
|
|
+;authentication_db = _users
|
|
|
|
|
+
|
|
|
|
|
+; These options are moved from [couch_httpd_auth]
|
|
|
|
|
+;authentication_redirect = /_utils/session.html
|
|
|
|
|
+;require_valid_user = false
|
|
|
|
|
+;timeout = 600 ; number of seconds before automatic logout
|
|
|
|
|
+;auth_cache_size = 50 ; size is number of cache entries
|
|
|
|
|
+;allow_persistent_cookies = true ; set to false to disallow persistent cookies
|
|
|
|
|
+;iterations = 10 ; iterations for password hashing
|
|
|
|
|
+;min_iterations = 1
|
|
|
|
|
+;max_iterations = 1000000000
|
|
|
|
|
+;password_scheme = pbkdf2
|
|
|
|
|
+; List of Erlang RegExp or tuples of RegExp and an optional error message.
|
|
|
|
|
+; Where a new password must match all RegExp.
|
|
|
|
|
+; Example: [{".{10,}", "Password min length is 10 characters."}, "\\d+"]
|
|
|
|
|
+;password_regexp = []
|
|
|
|
|
+;proxy_use_secret = false
|
|
|
|
|
+; comma-separated list of public fields, 404 if empty
|
|
|
|
|
+;public_fields =
|
|
|
|
|
+;secret =
|
|
|
|
|
+;users_db_public = false
|
|
|
|
|
+;cookie_domain = example.com
|
|
|
|
|
+; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set.
|
|
|
|
|
+;same_site =
|
|
|
|
|
+
|
|
|
|
|
+; [chttpd_auth_cache]
|
|
|
|
|
+; max_lifetime = 600000
|
|
|
|
|
+; max_objects =
|
|
|
|
|
+; max_size = 104857600
|
|
|
|
|
+
|
|
|
|
|
+; [mem3]
|
|
|
|
|
+; nodes_db = _nodes
|
|
|
|
|
+; shard_cache_size = 25000
|
|
|
|
|
+; shards_db = _dbs
|
|
|
|
|
+; sync_concurrency = 10
|
|
|
|
|
+
|
|
|
|
|
+; [fabric]
|
|
|
|
|
+; all_docs_concurrency = 10
|
|
|
|
|
+; changes_duration =
|
|
|
|
|
+; shard_timeout_factor = 2
|
|
|
|
|
+; shard_timeout_min_msec = 100
|
|
|
|
|
+; uuid_prefix_len = 7
|
|
|
|
|
+; request_timeout = 60000
|
|
|
|
|
+; all_docs_timeout = 10000
|
|
|
|
|
+; attachments_timeout = 60000
|
|
|
|
|
+; view_timeout = 3600000
|
|
|
|
|
+; partition_view_timeout = 3600000
|
|
|
|
|
+
|
|
|
|
|
+; [rexi]
|
|
|
|
|
+; buffer_count = 2000
|
|
|
|
|
+; server_per_node = true
|
|
|
|
|
+; stream_limit = 5
|
|
|
|
|
+;
|
|
|
|
|
+; Use a single message to kill a group of remote workers This is
|
|
|
|
|
+; mostly is an upgrade clause to allow operating in a mixed cluster of
|
|
|
|
|
+; 2.x and 3.x nodes. After upgrading switch to true to save some
|
|
|
|
|
+; network bandwidth
|
|
|
|
|
+;use_kill_all = false
|
|
|
|
|
+
|
|
|
|
|
+; [global_changes]
|
|
|
|
|
+; max_event_delay = 25
|
|
|
|
|
+; max_write_delay = 500
|
|
|
|
|
+; update_db = true
|
|
|
|
|
+
|
|
|
|
|
+; [view_updater]
|
|
|
|
|
+; min_writer_items = 100
|
|
|
|
|
+; min_writer_size = 16777216
|
|
|
|
|
+
|
|
|
|
|
+[couch_httpd_auth]
|
|
|
|
|
+; WARNING! This only affects the node-local port (5986 by default).
|
|
|
|
|
+; You probably want the settings under [chttpd].
|
|
|
|
|
+authentication_db = _users
|
|
|
|
|
+
|
|
|
|
|
+; These settings were moved to [chttpd_auth]
|
|
|
|
|
+; authentication_redirect, require_valid_user, timeout,
|
|
|
|
|
+; auth_cache_size, allow_persistent_cookies, iterations, min_iterations,
|
|
|
|
|
+; max_iterations, password_scheme, password_regexp, proxy_use_secret,
|
|
|
|
|
+; public_fields, secret, users_db_public, cookie_domain, same_site
|
|
|
|
|
+
|
|
|
|
|
+; CSP (Content Security Policy) Support
|
|
|
|
|
+[csp]
|
|
|
|
|
+;utils_enable = true
|
|
|
|
|
+;utils_header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
|
|
|
|
|
+;attachments_enable = true
|
|
|
|
|
+;attachments_header_value = sandbox
|
|
|
|
|
+;showlist_enable = true
|
|
|
|
|
+;showlist_header_value = sandbox
|
|
|
|
|
+
|
|
|
|
|
+[cors]
|
|
|
|
|
+;credentials = false
|
|
|
|
|
+; List of origins separated by a comma, * means accept all
|
|
|
|
|
+; Origins must include the scheme: http://example.com
|
|
|
|
|
+; You can't set origins: * and credentials = true at the same time.
|
|
|
|
|
+;origins = *
|
|
|
|
|
+; List of accepted headers separated by a comma
|
|
|
|
|
+; headers =
|
|
|
|
|
+; List of accepted methods
|
|
|
|
|
+; methods =
|
|
|
|
|
+
|
|
|
|
|
+; Configuration for a vhost
|
|
|
|
|
+;[cors:http://example.com]
|
|
|
|
|
+; credentials = false
|
|
|
|
|
+; List of origins separated by a comma
|
|
|
|
|
+; Origins must include the scheme: http://example.com
|
|
|
|
|
+; You can't set origins: * and credentials = true at the same time.
|
|
|
|
|
+;origins =
|
|
|
|
|
+; List of accepted headers separated by a comma
|
|
|
|
|
+; headers =
|
|
|
|
|
+; List of accepted methods
|
|
|
|
|
+; methods =
|
|
|
|
|
+
|
|
|
|
|
+; Configuration for the design document cache
|
|
|
|
|
+;[ddoc_cache]
|
|
|
|
|
+; The maximum size of the cache in bytes
|
|
|
|
|
+;max_size = 104857600 ; 100MiB
|
|
|
|
|
+; The period each cache entry should wait before
|
|
|
|
|
+; automatically refreshing in milliseconds
|
|
|
|
|
+;refresh_timeout = 67000
|
|
|
|
|
+
|
|
|
|
|
+[x_frame_options]
|
|
|
|
|
+; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
|
|
|
|
|
+; If same origin is set, it will ignore the hosts setting
|
|
|
|
|
+; same_origin = true
|
|
|
|
|
+; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
|
|
|
|
|
+; List of hosts separated by a comma. * means accept all
|
|
|
|
|
+; hosts =
|
|
|
|
|
+
|
|
|
|
|
+[native_query_servers]
|
|
|
|
|
+; erlang query server
|
|
|
|
|
+; enable_erlang_query_server = false
|
|
|
|
|
+
|
|
|
|
|
+; Changing reduce_limit to false will disable reduce_limit.
|
|
|
|
|
+; If you think you're hitting reduce_limit with a "good" reduce function,
|
|
|
|
|
+; please let us know on the mailing list so we can fine tune the heuristic.
|
|
|
|
|
+[query_server_config]
|
|
|
|
|
+; commit_freq = 5
|
|
|
|
|
+;reduce_limit = true
|
|
|
|
|
+;os_process_limit = 100
|
|
|
|
|
+; os_process_idle_limit = 300
|
|
|
|
|
+; os_process_soft_limit = 100
|
|
|
|
|
+; Timeout for how long a response from a busy view group server can take.
|
|
|
|
|
+; "infinity" is also a valid configuration value.
|
|
|
|
|
+;group_info_timeout = 5000
|
|
|
|
|
+;query_limit = 268435456
|
|
|
|
|
+;partition_query_limit = 268435456
|
|
|
|
|
+
|
|
|
|
|
+[mango]
|
|
|
|
|
+; Set to true to disable the "index all fields" text index, which can lead
|
|
|
|
|
+; to out of memory issues when users have documents with nested array fields.
|
|
|
|
|
+;index_all_disabled = false
|
|
|
|
|
+; Default limit value for mango _find queries.
|
|
|
|
|
+;default_limit = 25
|
|
|
|
|
+; Ratio between documents scanned and results matched that will
|
|
|
|
|
+; generate a warning in the _find response. Setting this to 0 disables
|
|
|
|
|
+; the warning.
|
|
|
|
|
+;index_scan_warning_threshold = 10
|
|
|
|
|
+
|
|
|
|
|
+[indexers]
|
|
|
|
|
+couch_mrview = true
|
|
|
|
|
+
|
|
|
|
|
+[feature_flags]
|
|
|
|
|
+; This enables any database to be created as a partitioned databases (except system db's).
|
|
|
|
|
+; Setting this to false will stop the creation of paritioned databases.
|
|
|
|
|
+; paritioned||allowed* = true will scope the creation of partitioned databases
|
|
|
|
|
+; to databases with 'allowed' prefix.
|
|
|
|
|
+partitioned||* = true
|
|
|
|
|
+
|
|
|
|
|
+[uuids]
|
|
|
|
|
+; Known algorithms:
|
|
|
|
|
+; random - 128 bits of random awesome
|
|
|
|
|
+; All awesome, all the time.
|
|
|
|
|
+; sequential - monotonically increasing ids with random increments
|
|
|
|
|
+; First 26 hex characters are random. Last 6 increment in
|
|
|
|
|
+; random amounts until an overflow occurs. On overflow, the
|
|
|
|
|
+; random prefix is regenerated and the process starts over.
|
|
|
|
|
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
|
|
|
|
|
+; First 14 characters are the time in hex. Last 18 are random.
|
|
|
|
|
+; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
|
|
|
|
|
+; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
|
|
|
|
|
+;algorithm = sequential
|
|
|
|
|
+; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
|
|
|
|
|
+; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
|
|
|
|
|
+;utc_id_suffix =
|
|
|
|
|
+# Maximum number of UUIDs retrievable from /_uuids in a single request
|
|
|
|
|
+;max_count = 1000
|
|
|
|
|
+
|
|
|
|
|
+[attachments]
|
|
|
|
|
+;compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
|
|
|
|
|
+;compressible_types = text/*, application/javascript, application/json, application/xml
|
|
|
|
|
+
|
|
|
|
|
+[replicator]
|
|
|
|
|
+; Random jitter applied on replication job startup (milliseconds)
|
|
|
|
|
+;startup_jitter = 5000
|
|
|
|
|
+; Number of actively running replications
|
|
|
|
|
+;max_jobs = 500
|
|
|
|
|
+;Scheduling interval in milliseconds. During each reschedule cycle
|
|
|
|
|
+;interval = 60000
|
|
|
|
|
+; Maximum number of replications to start and stop during rescheduling.
|
|
|
|
|
+;max_churn = 20
|
|
|
|
|
+; More worker processes can give higher network throughput but can also
|
|
|
|
|
+; imply more disk and network IO.
|
|
|
|
|
+;worker_processes = 4
|
|
|
|
|
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
|
|
|
|
|
+; also reduce the total amount of used RAM memory.
|
|
|
|
|
+;worker_batch_size = 500
|
|
|
|
|
+; Maximum number of HTTP connections per replication.
|
|
|
|
|
+;http_connections = 20
|
|
|
|
|
+; HTTP connection timeout per replication.
|
|
|
|
|
+; Even for very fast/reliable networks it might need to be increased if a remote
|
|
|
|
|
+; database is too busy.
|
|
|
|
|
+;connection_timeout = 30000
|
|
|
|
|
+; Request timeout
|
|
|
|
|
+;request_timeout = infinity
|
|
|
|
|
+; If a request fails, the replicator will retry it up to N times.
|
|
|
|
|
+;retries_per_request = 5
|
|
|
|
|
+; Use checkpoints
|
|
|
|
|
+;use_checkpoints = true
|
|
|
|
|
+; Checkpoint interval
|
|
|
|
|
+;checkpoint_interval = 30000
|
|
|
|
|
+; Some socket options that might boost performance in some scenarios:
|
|
|
|
|
+; {nodelay, boolean()}
|
|
|
|
|
+; {sndbuf, integer()}
|
|
|
|
|
+; {recbuf, integer()}
|
|
|
|
|
+; {priority, integer()}
|
|
|
|
|
+; See the `inet` Erlang module's man page for the full list of options.
|
|
|
|
|
+;socket_options = [{keepalive, true}, {nodelay, false}]
|
|
|
|
|
+; Path to a file containing the user's certificate.
|
|
|
|
|
+;cert_file = /full/path/to/server_cert.pem
|
|
|
|
|
+; Path to file containing user's private PEM encoded key.
|
|
|
|
|
+;key_file = /full/path/to/server_key.pem
|
|
|
|
|
+; String containing the user's password. Only used if the private keyfile is password protected.
|
|
|
|
|
+;password = somepassword
|
|
|
|
|
+; Set to true to validate peer certificates.
|
|
|
|
|
+;verify_ssl_certificates = false
|
|
|
|
|
+; File containing a list of peer trusted certificates (in the PEM format).
|
|
|
|
|
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
|
|
|
|
|
+; Maximum peer certificate depth (must be set even if certificate validation is off).
|
|
|
|
|
+;ssl_certificate_max_depth = 3
|
|
|
|
|
+; Maximum document ID length for replication.
|
|
|
|
|
+;max_document_id_length = infinity
|
|
|
|
|
+; How much time to wait before retrying after a missing doc exception. This
|
|
|
|
|
+; exception happens if the document was seen in the changes feed, but internal
|
|
|
|
|
+; replication hasn't caught up yet, and fetching document's revisions
|
|
|
|
|
+; fails. This a common scenario when source is updated while continous
|
|
|
|
|
+; replication is running. The retry period would depend on how quickly internal
|
|
|
|
|
+; replication is expected to catch up. In general this is an optimisation to
|
|
|
|
|
+; avoid crashing the whole replication job, which would consume more resources
|
|
|
|
|
+; and add log noise.
|
|
|
|
|
+;missing_doc_retry_msec = 2000
|
|
|
|
|
+; Wait this many seconds after startup before attaching changes listeners
|
|
|
|
|
+; cluster_start_period = 5
|
|
|
|
|
+; Re-check cluster state at least every cluster_quiet_period seconds
|
|
|
|
|
+; cluster_quiet_period = 60
|
|
|
|
|
+
|
|
|
|
|
+; List of replicator client authentication plugins to try. Plugins will be
|
|
|
|
|
+; tried in order. The first to initialize successfully will be used for that
|
|
|
|
|
+; particular endpoint (source or target). Normally couch_replicator_auth_noop
|
|
|
|
|
+; would be used at the end of the list as a "catch-all". It doesn't do anything
|
|
|
|
|
+; and effectively implements the previous behavior of using basic auth.
|
|
|
|
|
+; There are currently two plugins available:
|
|
|
|
|
+; couch_replicator_auth_session - use _session cookie authentication
|
|
|
|
|
+; couch_replicator_auth_noop - use basic authentication (previous default)
|
|
|
|
|
+; Currently, the new _session cookie authentication is tried first, before
|
|
|
|
|
+; falling back to the old basic authenticaion default:
|
|
|
|
|
+;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
|
|
|
|
|
+; To restore the old behaviour, use the following value:
|
|
|
|
|
+;auth_plugins = couch_replicator_auth_noop
|
|
|
|
|
+
|
|
|
|
|
+; Force couch_replicator_auth_session plugin to refresh the session
|
|
|
|
|
+; periodically if max-age is not present in the cookie. This is mostly to
|
|
|
|
|
+; handle the case where anonymous writes are allowed to the database and a VDU
|
|
|
|
|
+; function is used to forbid writes based on the authenticated user name. In
|
|
|
|
|
+; that case this value should be adjusted based on the expected minimum session
|
|
|
|
|
+; expiry timeout on replication endpoints. If session expiry results in a 401
|
|
|
|
|
+; or 403 response this setting is not needed.
|
|
|
|
|
+;session_refresh_interval_sec = 550
|
|
|
|
|
+
|
|
|
|
|
+; Usage coefficient decays historic fair share usage every scheduling
|
|
|
|
|
+; cycle. The value must be between 0.0 and 1.0. Lower values will
|
|
|
|
|
+; ensure historic usage decays quicker and higher values means it will
|
|
|
|
|
+; be remembered longer.
|
|
|
|
|
+;usage_coeff = 0.5
|
|
|
|
|
+
|
|
|
|
|
+; Priority coefficient decays all the job priorities such that they slowly
|
|
|
|
|
+; drift towards the front of the run queue. This coefficient defines a maximum
|
|
|
|
|
+; time window over which this algorithm would operate. For example, if this
|
|
|
|
|
+; value is too small (0.1), after a few cycles quite a few jobs would end up at
|
|
|
|
|
+; priority 0, and would render this algorithm useless. The default value of
|
|
|
|
|
+; 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
|
|
|
|
|
+; get to run for 7 hours, it would still have priority > 0. 7 hours was picked
|
|
|
|
|
+; as it was close enought to 8 hours which is the default maximum error backoff
|
|
|
|
|
+; interval.
|
|
|
|
|
+;priority_coeff = 0.98
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+[replicator.shares]
|
|
|
|
|
+; Fair share configuration section. More shares result in a higher
|
|
|
|
|
+; chance that jobs from that db get to run. The default value is 100,
|
|
|
|
|
+; minimum is 1 and maximum is 1000. The configuration may be set even
|
|
|
|
|
+; if the database does not exit.
|
|
|
|
|
+;_replicator = 100
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+[log]
|
|
|
|
|
+; Possible log levels:
|
|
|
|
|
+; debug
|
|
|
|
|
+; info
|
|
|
|
|
+; notice
|
|
|
|
|
+; warning, warn
|
|
|
|
|
+; error, err
|
|
|
|
|
+; critical, crit
|
|
|
|
|
+; alert
|
|
|
|
|
+; emergency, emerg
|
|
|
|
|
+; none
|
|
|
|
|
+;
|
|
|
|
|
+;level = info
|
|
|
|
|
+;
|
|
|
|
|
+; Set the maximum log message length in bytes that will be
|
|
|
|
|
+; passed through the writer
|
|
|
|
|
+;
|
|
|
|
|
+; max_message_size = 16000
|
|
|
|
|
+;
|
|
|
|
|
+; Do not log last message received by terminated process
|
|
|
|
|
+; strip_last_msg = true
|
|
|
|
|
+;
|
|
|
|
|
+; List of fields to remove before logging the crash report
|
|
|
|
|
+; filter_fields = [pid, registered_name, error_info, messages]
|
|
|
|
|
+;
|
|
|
|
|
+; There are four different log writers that can be configured
|
|
|
|
|
+; to write log messages. The default writes to stderr of the
|
|
|
|
|
+; Erlang VM which is useful for debugging/development as well
|
|
|
|
|
+; as a lot of container deployments.
|
|
|
|
|
+;
|
|
|
|
|
+; There's also a file writer that works with logrotate, a
|
|
|
|
|
+; rsyslog writer for deployments that need to have logs sent
|
|
|
|
|
+; over the network, and a journald writer that's more suitable
|
|
|
|
|
+; when using systemd journald.
|
|
|
|
|
+;
|
|
|
|
|
+;writer = stderr
|
|
|
|
|
+; Journald Writer notes:
|
|
|
|
|
+;
|
|
|
|
|
+; The journald writer doesn't have any options. It still writes
|
|
|
|
|
+; the logs to stderr, but without the timestamp prepended, since
|
|
|
|
|
+; the journal will add it automatically, and with the log level
|
|
|
|
|
+; formated as per
|
|
|
|
|
+; https://www.freedesktop.org/software/systemd/man/sd-daemon.html
|
|
|
|
|
+;
|
|
|
|
|
+;
|
|
|
|
|
+; File Writer Options:
|
|
|
|
|
+;
|
|
|
|
|
+; The file writer will check every 30s to see if it needs
|
|
|
|
|
+; to reopen its file. This is useful for people that configure
|
|
|
|
|
+; logrotate to move log files periodically.
|
|
|
|
|
+;
|
|
|
|
|
+; file = ./couch.log ; Path name to write logs to
|
|
|
|
|
+;
|
|
|
|
|
+; Write operations will happen either every write_buffer bytes
|
|
|
|
|
+; or write_delay milliseconds. These are passed directly to the
|
|
|
|
|
+; Erlang file module with the write_delay option documented here:
|
|
|
|
|
+;
|
|
|
|
|
+; http://erlang.org/doc/man/file.html
|
|
|
|
|
+;
|
|
|
|
|
+; write_buffer = 0
|
|
|
|
|
+; write_delay = 0
|
|
|
|
|
+;
|
|
|
|
|
+;
|
|
|
|
|
+; Syslog Writer Options:
|
|
|
|
|
+;
|
|
|
|
|
+; The syslog writer options all correspond to their obvious
|
|
|
|
|
+; counter parts in rsyslog nomenclature.
|
|
|
|
|
+;
|
|
|
|
|
+; syslog_host =
|
|
|
|
|
+; syslog_port = 514
|
|
|
|
|
+; syslog_appid = couchdb
|
|
|
|
|
+; syslog_facility = local2
|
|
|
|
|
+
|
|
|
|
|
+[stats]
|
|
|
|
|
+; Stats collection interval in seconds. Default 10 seconds.
|
|
|
|
|
+;interval = 10
|
|
|
|
|
+
|
|
|
|
|
+[smoosh]
|
|
|
|
|
+;
|
|
|
|
|
+; More documentation on these is in the Automatic Compaction
|
|
|
|
|
+; section of the documentation.
|
|
|
|
|
+;
|
|
|
|
|
+;db_channels = upgrade_dbs,ratio_dbs,slack_dbs
|
|
|
|
|
+;view_channels = upgrade_views,ratio_views,slack_views
|
|
|
|
|
+;
|
|
|
|
|
+;[smoosh.ratio_dbs]
|
|
|
|
|
+;priority = ratio
|
|
|
|
|
+;min_priority = 2.0
|
|
|
|
|
+;
|
|
|
|
|
+;[smoosh.ratio_views]
|
|
|
|
|
+;priority = ratio
|
|
|
|
|
+;min_priority = 2.0
|
|
|
|
|
+;
|
|
|
|
|
+;[smoosh.slack_dbs]
|
|
|
|
|
+;priority = slack
|
|
|
|
|
+;min_priority = 536870912
|
|
|
|
|
+;
|
|
|
|
|
+;[smoosh.slack_views]
|
|
|
|
|
+;priority = slack
|
|
|
|
|
+;min_priority = 536870912
|
|
|
|
|
+
|
|
|
|
|
+[ioq]
|
|
|
|
|
+; The maximum number of concurrent in-flight IO requests that
|
|
|
|
|
+;concurrency = 10
|
|
|
|
|
+
|
|
|
|
|
+; The fraction of the time that a background IO request will be selected
|
|
|
|
|
+; over an interactive IO request when both queues are non-empty
|
|
|
|
|
+;ratio = 0.01
|
|
|
|
|
+
|
|
|
|
|
+[ioq.bypass]
|
|
|
|
|
+; System administrators can choose to submit specific classes of IO directly
|
|
|
|
|
+; to the underlying file descriptor or OS process, bypassing the queues
|
|
|
|
|
+; altogether. Installing a bypass can yield higher throughput and lower
|
|
|
|
|
+; latency, but relinquishes some control over prioritization. The following
|
|
|
|
|
+; classes are recognized with the following defaults:
|
|
|
|
|
+
|
|
|
|
|
+; Messages on their way to an external process (e.g., couchjs) are bypassed
|
|
|
|
|
+;os_process = true
|
|
|
|
|
+
|
|
|
|
|
+; Disk IO fulfilling interactive read requests is bypassed
|
|
|
|
|
+;read = true
|
|
|
|
|
+
|
|
|
|
|
+; Disk IO required to update a database is bypassed
|
|
|
|
|
+;write = true
|
|
|
|
|
+
|
|
|
|
|
+; Disk IO required to update views and other secondary indexes is bypassed
|
|
|
|
|
+;view_update = true
|
|
|
|
|
+
|
|
|
|
|
+; Disk IO issued by the background replication processes that fix any
|
|
|
|
|
+; inconsistencies between shard copies is queued
|
|
|
|
|
+;shard_sync = false
|
|
|
|
|
+
|
|
|
|
|
+; Disk IO issued by compaction jobs is queued
|
|
|
|
|
+;compaction = false
|
|
|
|
|
+
|
|
|
|
|
+[dreyfus]
|
|
|
|
|
+; The name and location of the Clouseau Java service required to
|
|
|
|
|
+; enable Search functionality.
|
|
|
|
|
+; name = clouseau@127.0.0.1
|
|
|
|
|
+
|
|
|
|
|
+; CouchDB will try to re-connect to Clouseau using a bounded
|
|
|
|
|
+; exponential backoff with the following number of iterations.
|
|
|
|
|
+; retry_limit = 5
|
|
|
|
|
+
|
|
|
|
|
+; The default number of results returned from a global search query.
|
|
|
|
|
+; limit = 25
|
|
|
|
|
+
|
|
|
|
|
+; The default number of results returned from a search on a partition
|
|
|
|
|
+; of a database.
|
|
|
|
|
+; limit_partitions = 2000
|
|
|
|
|
+
|
|
|
|
|
+; The maximum number of results that can be returned from a global
|
|
|
|
|
+; search query (or any search query on a database without user-defined
|
|
|
|
|
+; partitions). Attempts to set ?limit=N higher than this value will
|
|
|
|
|
+; be rejected.
|
|
|
|
|
+; max_limit = 200
|
|
|
|
|
+
|
|
|
|
|
+; The maximum number of results that can be returned when searching
|
|
|
|
|
+; a partition of a database. Attempts to set ?limit=N higher than this
|
|
|
|
|
+; value will be rejected. If this config setting is not defined,
|
|
|
|
|
+; CouchDB will use the value of `max_limit` instead. If neither is
|
|
|
|
|
+; defined, the default is 2000 as stated here.
|
|
|
|
|
+; max_limit_partitions = 2000
|
|
|
|
|
+
|
|
|
|
|
+[reshard]
|
|
|
|
|
+;max_jobs = 48
|
|
|
|
|
+;max_history = 20
|
|
|
|
|
+;max_retries = 1
|
|
|
|
|
+;retry_interval_sec = 10
|
|
|
|
|
+;delete_source = true
|
|
|
|
|
+;update_shard_map_timeout_sec = 60
|
|
|
|
|
+;source_close_timeout_sec = 600
|
|
|
|
|
+;require_node_param = false
|
|
|
|
|
+;require_range_param = false
|
|
|
|
|
+
|
|
|
|
|
+[prometheus]
|
|
|
|
|
+additional_port = false
|
|
|
|
|
+bind_address = 127.0.0.1
|
|
|
|
|
+port = 17986
|