1; @configure_input@
2
3; Upgrading CouchDB will overwrite this file.
4
5[couchdb]
6database_dir = ${CMAKE_INSTALL_PREFIX}/var/lib/couchdb
7view_index_dir = ${CMAKE_INSTALL_PREFIX}/var/lib/couchdb
8util_driver_dir = ${CMAKE_INSTALL_PREFIX}/${CMAKE_ERL_LIB_INSTALL_PREFIX}/couch-1.2.0a-961ad59-git/priv/lib
9delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
10uri_file = ${CMAKE_INSTALL_PREFIX}/var/run/couchdb/couch.uri
11; Maximum number of distinct view update/building processes at any point in time.
12max_parallel_indexers = 4
13max_parallel_replica_indexers = 2
14max_parallel_spatial_indexers = 4
15consistency_check_precompacted = false
16consistency_check_compacted = false
17; Maximum period for which we attempt to retry file operations on Windows.
18windows_file_op_retry_period = 5000
19
20[database_compaction]
21; larger buffer sizes can originate smaller files
22doc_buffer_size = 524288 ; value in bytes
23checkpoint_after = 5242880 ; checkpoint after every N bytes were written
24
25[httpd]
26port = 5984
27ip4_bind_address = 127.0.0.1
28ip6_bind_address = ::1
29authentication_handlers =
30default_handler = {couch_httpd_db, handle_request}
31allow_jsonp = false
32db_frontend = couch_db_frontend
33; Options for the MochiWeb HTTP server.
34;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
35; For more socket options, consult Erlang's module 'inet' man page.
36;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
37
38[ssl]
39port = 6984
40
41[log]
42file = ${CMAKE_INSTALL_PREFIX}/var/log/couchdb/couch.log
43level = info
44include_sasl = true
45
46[daemons]
47view_manager={couch_view, start_link, []}
48set_view_manager={couch_set_view, start_link, [prod, mapreduce_view]}
49set_view_manager_dev={couch_set_view, start_link, [dev, mapreduce_view]}
50index_merger_pool={lhttpc_manager, start_link, [[{connection_timeout, 90000}, {pool_size, 10000}, {name, couch_index_merger_connection_pool}]]}
51query_servers={couch_query_servers, start_link, []}
52httpd={couch_httpd, start_link, []}
53uuids={couch_uuids, start, []}
54couch_set_view_ddoc_cache={couch_set_view_ddoc_cache, start_link, []}
55replication_manager={couch_replication_manager, start_link, []}
56compaction_daemon={couch_compaction_daemon, start_link, []}
57
58[httpd_global_handlers]
59/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
60_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
61_view_merge = {couch_httpd_view_merger, handle_req}
62_set_view = {couch_set_view_http, handle_req}
63
64[httpd_db_handlers]
65_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
66_compact = {couch_httpd_db, handle_compact_req}
67_design = {couch_httpd_db, handle_design_req}
68_changes = {couch_httpd_db, handle_changes_req}
69
70[httpd_design_handlers]
71_view = {couch_httpd_view, handle_view_req}
72_info = {couch_httpd_db,   handle_design_info_req}
73
74[uuids]
75; Known algorithms:
76;   random - 128 bits of random awesome
77;     All awesome, all the time.
78;   sequential - monotonically increasing ids with random increments
79;     First 26 hex characters are random. Last 6 increment in
80;     random amounts until an overflow occurs. On overflow, the
81;     random prefix is regenerated and the process starts over.
82;   utc_random - Time since Jan 1, 1970 UTC with microseconds
83;     First 14 characters are the time in hex. Last 18 are random.
84algorithm = sequential
85
86[replicator]
87db = _replicator
88; Maximum replicaton retry count can be a non-negative integer or "infinity".
89max_replication_retry_count = 10
90; More worker processes can give higher network throughput but can also
91; imply more disk and network IO.
92worker_processes = 4
93; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
94; also reduce the total amount of used RAM memory.
95worker_batch_size = 500
96; Maximum number of HTTP connections per replication.
97http_connections = 20
98; HTTP connection timeout per replication.
99; Even for very fast/reliable networks it might need to be increased if a remote
100; database is too busy.
101connection_timeout = 30000
102; If a request fails, the replicator will retry it up to N times.
103retries_per_request = 2
104; Some socket options that might boost performance in some scenarios:
105;       {nodelay, boolean()}
106;       {sndbuf, integer()}
107;       {recbuf, integer()}
108;       {priority, integer()}
109; See the `inet` Erlang module's man page for the full list of options.
110socket_options = [{keepalive, true}, {nodelay, false}]
111; set to true to validate peer certificates
112verify_ssl_certificates = false
113; file containing a list of peer trusted certificates (PEM format)
114; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
115; maximum peer certificate depth (must be set even if certificate validation is off)
116ssl_certificate_max_depth = 3
117
118[compaction_daemon]
119; The delay, in seconds, between each check for which database and view indexes
120; need to be compacted.
121check_interval = 60
122; If a database or view index file is smaller then this value (in bytes),
123; compaction will not happen. Very small files always have a very high
124; fragmentation therefore it's not worth to compact them.
125min_file_size = 131072
126
127[compactions]
128; List of compaction rules for the compaction daemon.
129; The daemon compacts databases and their respective view groups when all the
130; condition parameters are satisfied. Configuration can be per database or
131; global, and it has the following format:
132;
133; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
134; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
135;
136; Possible parameters:
137;
138; * db_fragmentation - If the ratio (as an integer percentage), of the amount
139;                      of old data (and its supporting metadata) over the database
140;                      file size is equal to or greater then this value, this
141;                      database compaction condition is satisfied.
142;                      This value is computed as:
143;
144;                           (file_size - data_size) / file_size * 100
145;
146;                      The data_size and file_size values can be obtained when
147;                      querying a database's information URI (GET /dbname/).
148;
149; * view_fragmentation - If the ratio (as an integer percentage), of the amount
150;                        of old data (and its supporting metadata) over the view
151;                        index (view group) file size is equal to or greater then
152;                        this value, then this view index compaction condition is
153;                        satisfied. This value is computed as:
154;
155;                            (file_size - data_size) / file_size * 100
156;
157;                        The data_size and file_size values can be obtained when
158;                        querying a view group's information URI
159;                        (GET /dbname/_design/groupname/_info).
160;
161; * from _and_ to - The period for which a database (and its view groups) compaction
162;                   is allowed. The value for these parameters must obey the format:
163;
164;                   HH:MM - HH:MM  (HH in [0..23], MM in [0..59])
165;
166; * strict_window - If a compaction is still running after the end of the allowed
167;                   period, it will be canceled if this parameter is set to 'true'.
168;                   It defaults to 'false' and it's meaningful only if the *period*
169;                   parameter is also specified.
170;
171; * parallel_view_compaction - If set to 'true', the database and its views are
172;                              compacted in parallel. This is only useful on
173;                              certain setups, like for example when the database
174;                              and view index directories point to different
175;                              disks. It defaults to 'false'.
176;
177; Before a compaction is triggered, an estimation of how much free disk space is
178; needed is computed. This estimation corresponds to 2 times the data size of
179; the database or view index. When there's not enough free disk space to compact
180; a particular database or view index, a warning message is logged.
181;
182; Examples:
183;
184; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
185;    The `foo` database is compacted if its fragmentation is 70% or more.
186;    Any view index of this database is compacted only if its fragmentation
187;    is 60% or more.
188;
189; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
190;    Similar to the preceding example but a compaction (database or view index)
191;    is only triggered if the current time is between midnight and 4 AM.
192;
193; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
194;    Similar to the preceding example - a compaction (database or view index)
195;    is only triggered if the current time is between midnight and 4 AM. If at
196;    4 AM the database or one of its views is still compacting, the compaction
197;    process will be canceled.
198;
199; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
200;    Similar to the preceding example, but a database and its views can be
201;    compacted in parallel.
202;
203;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]
204;
205;[vendor]
206;name = Couchbase Single Server
207;version = 2.0.0
208;url = http://www.couchbase.com/
209
210[mapreduce]
211; Maximum duration, in milliseconds, for the execution time of all the map/reduce
212; functions in a design document against a single document (map function) or against
213; a list of map values/reductions (reduce/rereduce function).
214function_timeout = 10000
215; The maximum byte size allowed to be emitted for a single document. This is the
216; sum of the sizes of all emitted keys and values. A maximum of 0 means no limit.
217max_kv_size_per_doc = 1048576
218; Do not pull document when document fields are not used in any of
219; map functions defined in a given design document
220; This is to optimize view indexing time.
221optimize_doc_loading = true
222
223[set_views]
224ddoc_cache_size = 1048576
225btree_kv_node_threshold = 7168
226btree_kp_node_threshold = 6144
227; For incremental updates (value in bytes).
228indexer_max_insert_batch_size = 1048576
229; Maximum size (in bytes) of documents sent to the JavaScript engine.
230; A value of 0 means no limit, any document is passed to the engine.
231indexer_max_doc_size = 20971520
232; Sleep period for updates to wait when compactor is in retry phase.
233; Value is in milliseconds.
234throttle_period = 0
235
236[spatial_views]
237vtree_kv_node_threshold = 2000
238vtree_kp_node_threshold = 2000
239vtree_min_fill_rate = 0.4
240
241[dcp]
242port = 11209
243connection_timeout = 5000
244flow_control_buffer_size = 20971520
245