1; @configure_input@
2
3; Upgrading CouchDB will overwrite this file.
4
5[couchdb]
6database_dir = @abs_top_builddir@/tmp/lib
7view_index_dir = @abs_top_builddir@/tmp/lib
8util_driver_dir = @abs_top_builddir@/src/couchdb/priv
9delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
10uri_file = @abs_top_builddir@/tmp/run/couch.uri
11; Maximum number of distinct view update/building processes at any point in time.
12max_parallel_indexers = 4
13max_parallel_replica_indexers = 2
14max_parallel_spatial_indexers = 4
15consistency_check_precompacted = false
16consistency_check_compacted = false
17; Maximum period for which we attempt to retry file operations on Windows.
18windows_file_op_retry_period = 5000
19
20[database_compaction]
21; larger buffer sizes can originate smaller files
22doc_buffer_size = 524288 ; value in bytes
23checkpoint_after = 5242880 ; checkpoint after every N bytes were written
24
25[httpd]
26port = 5984
27bind_address = 127.0.0.1
28authentication_handlers =
29; authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
30default_handler = {couch_httpd_db, handle_request}
31allow_jsonp = false
32db_frontend = couch_db_frontend
33; Options for the MochiWeb HTTP server.
34;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
35; For more socket options, consult Erlang's module 'inet' man page.
36;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
37
38[ssl]
39port = 6984
40
41[log]
42file = @abs_top_builddir@/tmp/log/couch.log
43level = info
44include_sasl = true
45
46[couch_httpd_auth]
47authentication_db = _users
48authentication_redirect = /_utils/session.html
49require_valid_user = false
50timeout = 600 ; number of seconds before automatic logout
51auth_cache_size = 50 ; size is number of cache entries
52
53[daemons]
54view_manager={couch_view, start_link, []}
55set_view_manager={couch_set_view, start_link, [prod, mapreduce_view]}
56set_view_manager_dev={couch_set_view, start_link, [dev, mapreduce_view]}
57index_merger_pool={lhttpc_manager, start_link, [[{connection_timeout, 90000}, {pool_size, 10000}, {name, couch_index_merger_connection_pool}]]}
58query_servers={couch_query_servers, start_link, []}
59httpd={couch_httpd, start_link, []}
60uuids={couch_uuids, start, []}
61auth_cache={couch_auth_cache, start_link, []}
62couch_set_view_ddoc_cache={couch_set_view_ddoc_cache, start_link, []}
63replication_manager={couch_replication_manager, start_link, []}
64compaction_daemon={couch_compaction_daemon, start_link, []}
65
66[httpd_global_handlers]
67/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
68_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
69_view_merge = {couch_httpd_view_merger, handle_req}
70_set_view = {couch_set_view_http, handle_req}
71
72[httpd_db_handlers]
73_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
74_compact = {couch_httpd_db, handle_compact_req}
75_design = {couch_httpd_db, handle_design_req}
76_changes = {couch_httpd_db, handle_changes_req}
77
78[httpd_design_handlers]
79_view = {couch_httpd_view, handle_view_req}
80_info = {couch_httpd_db,   handle_design_info_req}
81
82[uuids]
83; Known algorithms:
84;   random - 128 bits of random awesome
85;     All awesome, all the time.
86;   sequential - monotonically increasing ids with random increments
87;     First 26 hex characters are random. Last 6 increment in
88;     random amounts until an overflow occurs. On overflow, the
89;     random prefix is regenerated and the process starts over.
90;   utc_random - Time since Jan 1, 1970 UTC with microseconds
91;     First 14 characters are the time in hex. Last 18 are random.
92algorithm = sequential
93
94[replicator]
95db = _replicator
96; Maximum replicaton retry count can be a non-negative integer or "infinity".
97max_replication_retry_count = 10
98; More worker processes can give higher network throughput but can also
99; imply more disk and network IO.
100worker_processes = 4
101; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
102; also reduce the total amount of used RAM memory.
103worker_batch_size = 500
104; Maximum number of HTTP connections per replication.
105http_connections = 20
106; HTTP connection timeout per replication.
107; Even for very fast/reliable networks it might need to be increased if a remote
108; database is too busy.
109connection_timeout = 30000
110; If a request fails, the replicator will retry it up to N times.
111retries_per_request = 2
112; Some socket options that might boost performance in some scenarios:
113;       {nodelay, boolean()}
114;       {sndbuf, integer()}
115;       {recbuf, integer()}
116;       {priority, integer()}
117; See the `inet` Erlang module's man page for the full list of options.
118socket_options = [{keepalive, true}, {nodelay, false}]
119; set to true to validate peer certificates
120verify_ssl_certificates = false
121; file containing a list of peer trusted certificates (PEM format)
122; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
123; maximum peer certificate depth (must be set even if certificate validation is off)
124ssl_certificate_max_depth = 3
125
126[compaction_daemon]
127; The delay, in seconds, between each check for which database and view indexes
128; need to be compacted.
129check_interval = 60
130; If a database or view index file is smaller then this value (in bytes),
131; compaction will not happen. Very small files always have a very high
132; fragmentation therefore it's not worth to compact them.
133min_file_size = 131072
134
135[compactions]
136; List of compaction rules for the compaction daemon.
137; The daemon compacts databases and their respective view groups when all the
138; condition parameters are satisfied. Configuration can be per database or
139; global, and it has the following format:
140;
141; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
142; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
143;
144; Possible parameters:
145;
146; * db_fragmentation - If the ratio (as an integer percentage), of the amount
147;                      of old data (and its supporting metadata) over the database
148;                      file size is equal to or greater then this value, this
149;                      database compaction condition is satisfied.
150;                      This value is computed as:
151;
152;                           (file_size - data_size) / file_size * 100
153;
154;                      The data_size and file_size values can be obtained when
155;                      querying a database's information URI (GET /dbname/).
156;
157; * view_fragmentation - If the ratio (as an integer percentage), of the amount
158;                        of old data (and its supporting metadata) over the view
159;                        index (view group) file size is equal to or greater then
160;                        this value, then this view index compaction condition is
161;                        satisfied. This value is computed as:
162;
163;                            (file_size - data_size) / file_size * 100
164;
165;                        The data_size and file_size values can be obtained when
166;                        querying a view group's information URI
167;                        (GET /dbname/_design/groupname/_info).
168;
169; * from _and_ to - The period for which a database (and its view groups) compaction
170;                   is allowed. The value for these parameters must obey the format:
171;
172;                   HH:MM - HH:MM  (HH in [0..23], MM in [0..59])
173;
174; * strict_window - If a compaction is still running after the end of the allowed
175;                   period, it will be canceled if this parameter is set to 'true'.
176;                   It defaults to 'false' and it's meaningful only if the *period*
177;                   parameter is also specified.
178;
179; * parallel_view_compaction - If set to 'true', the database and its views are
180;                              compacted in parallel. This is only useful on
181;                              certain setups, like for example when the database
182;                              and view index directories point to different
183;                              disks. It defaults to 'false'.
184;
185; Before a compaction is triggered, an estimation of how much free disk space is
186; needed is computed. This estimation corresponds to 2 times the data size of
187; the database or view index. When there's not enough free disk space to compact
188; a particular database or view index, a warning message is logged.
189;
190; Examples:
191;
192; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
193;    The `foo` database is compacted if its fragmentation is 70% or more.
194;    Any view index of this database is compacted only if its fragmentation
195;    is 60% or more.
196;
197; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
198;    Similar to the preceding example but a compaction (database or view index)
199;    is only triggered if the current time is between midnight and 4 AM.
200;
201; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
202;    Similar to the preceding example - a compaction (database or view index)
203;    is only triggered if the current time is between midnight and 4 AM. If at
204;    4 AM the database or one of its views is still compacting, the compaction
205;    process will be canceled.
206;
207; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
208;    Similar to the preceding example, but a database and its views can be
209;    compacted in parallel.
210;
211;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]
212;
213;[vendor]
214;name = Couchbase Single Server
215;version = 2.0.0
216;url = http://www.couchbase.com/
217
218[mapreduce]
219; Maximum duration, in milliseconds, for the execution time of all the map/reduce
220; functions in a design document against a single document (map function) or against
221; a list of map values/reductions (reduce/rereduce function).
222function_timeout = 10000
223; The maximum byte size allowed to be emitted for a single document. This is the
224; sum of the sizes of all emitted keys and values. A maximum of 0 means no limit.
225max_kv_size_per_doc = 1048576
226
227[set_views]
228ddoc_cache_size = 1048576
229btree_kv_node_threshold = 7168
230btree_kp_node_threshold = 6144
231; For incremental updates (value in bytes).
232indexer_max_insert_batch_size = 1048576
233; Maximum size (in bytes) of documents sent to the JavaScript engine.
234; A value of 0 means no limit, any document is passed to the engine.
235indexer_max_doc_size = 1048576
236; Sleep period for updates to wait when compactor is in retry phase.
237; Value is in milliseconds.
238throttle_period = 0
239
240[spatial_views]
241vtree_kv_node_threshold = 2000
242vtree_kp_node_threshold = 2000
243vtree_min_fill_rate = 0.4
244
245[dcp]
246port = 11209
247connection_timeout = 5000
248flow_control_buffer_size = 20971520
249