akka-cluster
1.######################################
2.# Akka Cluster Reference Config File #
3.######################################
4.
5.# This is the reference config file that contains all the default settings.
6.# Make your edits/overrides in your application.conf.
7.
8.akka {
9.
10. cluster {
11. # Initial contact points of the cluster.
12. # The nodes to join automatically at startup.
13. # Comma separated full URIs defined by a string on the form of
14. # "akka.tcp://system@hostname:port"
15. # Leave as empty if the node is supposed to be joined manually.
16. seed-nodes = []
17.
18. # how long to wait for one of the seed nodes to reply to initial join request
19. seed-node-timeout = 5s
20.
21. # If a join request fails it will be retried after this period.
22. # Disable join retry by specifying "off".
23. retry-unsuccessful-join-after = 10s
24.
25. # Should the 'leader' in the cluster be allowed to automatically mark
26. # unreachable nodes as DOWN after a configured time of unreachability?
27. # Using auto-down implies that two separate clusters will automatically be
28. # formed in case of network partition.
29. #
30. # Don't enable this in production, see 'Auto-downing (DO NOT USE)' section
31. # of Akka Cluster documentation.
32. #
33. # Disable with "off" or specify a duration to enable auto-down.
34. # If a downing-provider-class is configured this setting is ignored.
35. auto-down-unreachable-after = off
36.
37. # Time margin after which shards or singletons that belonged to a downed/removed
38. # partition are created in surviving partition. The purpose of this margin is that
39. # in case of a network partition the persistent actors in the non-surviving partitions
40. # must be stopped before corresponding persistent actors are started somewhere else.
41. # This is useful if you implement downing strategies that handle network partitions,
42. # e.g. by keeping the larger side of the partition and shutting down the smaller side.
43. # It will not add any extra safety for auto-down-unreachable-after, since that is not
44. # handling network partitions.
45. # Disable with "off" or specify a duration to enable.
46. down-removal-margin = off
47.
48. # Pluggable support for downing of nodes in the cluster.
49. # If this setting is left empty behaviour will depend on 'auto-down-unreachable' in the following ways:
50. # * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed
51. # * if it is set to a duration the `AutoDowning` provider is with the configured downing duration
52. #
53. # If specified the value must be the fully qualified class name of a subclass of
54. # `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
55. downing-provider-class = ""
56.
57. # Artery only setting
58. # When a node has been gracefully removed, let this time pass (to allow for example
59. # cluster singleton handover to complete) and then quarantine the removed node.
60. quarantine-removed-node-after=30s
61.
62. # By default, the leader will not move 'Joining' members to 'Up' during a network
63. # split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
64. # so they become part of the cluster even during a network split. The leader will
65. # move 'WeaklyUp' members to 'Up' status once convergence has been reached. This
66. # feature must be off if some members are running Akka 2.3.X.
67. # WeaklyUp is an EXPERIMENTAL feature.
68. allow-weakly-up-members = off
69.
70. # The roles of this member. List of strings, e.g. roles = ["A", "B"].
71. # The roles are part of the membership information and can be used by
72. # routers or other services to distribute work to certain member types,
73. # e.g. front-end and back-end nodes.
74. roles = []
75.
76. role {
77. # Minimum required number of members of a certain role before the leader
78. # changes member status of 'Joining' members to 'Up'. Typically used together
79. # with 'Cluster.registerOnMemberUp' to defer some action, such as starting
80. # actors, until the cluster has reached a certain size.
81. # E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend':
82. # frontend.min-nr-of-members = 2
83. # backend.min-nr-of-members = 3
84. #<role-name>.min-nr-of-members = 1
85. }
86.
87. # Minimum required number of members before the leader changes member status
88. # of 'Joining' members to 'Up'. Typically used together with
89. # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
90. # until the cluster has reached a certain size.
91. min-nr-of-members = 1
92.
93. # Enable/disable info level logging of cluster events
94. log-info = on
95.
96. # Enable or disable JMX MBeans for management of the cluster
97. jmx.enabled = on
98.
99. # how long should the node wait before starting the periodic tasks
100. # maintenance tasks?
101. periodic-tasks-initial-delay = 1s
102.
103. # how often should the node send out gossip information?
104. gossip-interval = 1s
105.
106. # discard incoming gossip messages if not handled within this duration
107. gossip-time-to-live = 2s
108.
109. # how often should the leader perform maintenance tasks?
110. leader-actions-interval = 1s
111.
112. # how often should the node move nodes, marked as unreachable by the failure
113. # detector, out of the membership ring?
114. unreachable-nodes-reaper-interval = 1s
115.
116. # How often the current internal stats should be published.
117. # A value of 0s can be used to always publish the stats, when it happens.
118. # Disable with "off".
119. publish-stats-interval = off
120.
121. # The id of the dispatcher to use for cluster actors. If not specified
122. # default dispatcher is used.
123. # If specified you need to define the settings of the actual dispatcher.
124. use-dispatcher = ""
125.
126. # Gossip to random node with newer or older state information, if any with
127. # this probability. Otherwise Gossip to any random live node.
128. # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
129. gossip-different-view-probability = 0.8
130.
131. # Reduced the above probability when the number of nodes in the cluster
132. # greater than this value.
133. reduce-gossip-different-view-probability = 400
134.
135. # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
136. # [Hayashibara et al]) used by the cluster subsystem to detect unreachable
137. # members.
138. # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
139. # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
140. # i.e. around 5.5 seconds with default settings.
141. failure-detector {
142.
143. # FQCN of the failure detector implementation.
144. # It must implement akka.remote.FailureDetector and have
145. # a public constructor with a com.typesafe.config.Config and
146. # akka.actor.EventStream parameter.
147. implementation-class = "akka.remote.PhiAccrualFailureDetector"
148.
149. # How often keep-alive heartbeat messages should be sent to each connection.
150. heartbeat-interval = 1 s
151.
152. # Defines the failure detector threshold.
153. # A low threshold is prone to generate many wrong suspicions but ensures
154. # a quick detection in the event of a real crash. Conversely, a high
155. # threshold generates fewer mistakes but needs more time to detect
156. # actual crashes.
157. threshold = 8.0
158.
159. # Number of the samples of inter-heartbeat arrival times to adaptively
160. # calculate the failure timeout for connections.
161. max-sample-size = 1000
162.
163. # Minimum standard deviation to use for the normal distribution in
164. # AccrualFailureDetector. Too low standard deviation might result in
165. # too much sensitivity for sudden, but normal, deviations in heartbeat
166. # inter arrival times.
167. min-std-deviation = 100 ms
168.
169. # Number of potentially lost/delayed heartbeats that will be
170. # accepted before considering it to be an anomaly.
171. # This margin is important to be able to survive sudden, occasional,
172. # pauses in heartbeat arrivals, due to for example garbage collect or
173. # network drop.
174. acceptable-heartbeat-pause = 3 s
175.
176. # Number of member nodes that each member will send heartbeat messages to,
177. # i.e. each node will be monitored by this number of other nodes.
178. monitored-by-nr-of-members = 5
179.
180. # After the heartbeat request has been sent the first failure detection
181. # will start after this period, even though no heartbeat message has
182. # been received.
183. expected-response-after = 1 s
184.
185. }
186.
187. metrics {
188. # Enable or disable metrics collector for load-balancing nodes.
189. enabled = on
190.
191. # FQCN of the metrics collector implementation.
192. # It must implement akka.cluster.MetricsCollector and
193. # have public constructor with akka.actor.ActorSystem parameter.
194. # The default SigarMetricsCollector uses JMX and Hyperic SIGAR, if SIGAR
195. # is on the classpath, otherwise only JMX.
196. collector-class = "akka.cluster.SigarMetricsCollector"
197.
198. # How often metrics are sampled on a node.
199. # Shorter interval will collect the metrics more often.
200. collect-interval = 3s
201.
202. # How often a node publishes metrics information.
203. gossip-interval = 3s
204.
205. # How quickly the exponential weighting of past data is decayed compared to
206. # new data. Set lower to increase the bias toward newer values.
207. # The relevance of each data sample is halved for every passing half-life
208. # duration, i.e. after 4 times the half-life, a data sample’s relevance is
209. # reduced to 6% of its original relevance. The initial relevance of a data
210. # sample is given by 1 – 0.5 ^ (collect-interval / half-life).
211. # See http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
212. moving-average-half-life = 12s
213. }
214.
215. # If the tick-duration of the default scheduler is longer than the
216. # tick-duration configured here a dedicated scheduler will be used for
217. # periodic tasks of the cluster, otherwise the default scheduler is used.
218. # See akka.scheduler settings for more details.
219. scheduler {
220. tick-duration = 33ms
221. ticks-per-wheel = 512
222. }
223.
224. debug {
225. # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues)
226. verbose-heartbeat-logging = off
227. }
228.
229. }
230.
231. # Default configuration for routers
232. actor.deployment.default {
233. # MetricsSelector to use
234. # - available: "mix", "heap", "cpu", "load"
235. # - or: Fully qualified class name of the MetricsSelector class.
236. # The class must extend akka.cluster.routing.MetricsSelector
237. # and have a public constructor with com.typesafe.config.Config
238. # parameter.
239. # - default is "mix"
240. metrics-selector = mix
241. }
242. actor.deployment.default.cluster {
243. # enable cluster aware router that deploys to nodes in the cluster
244. enabled = off
245.
246. # Maximum number of routees that will be deployed on each cluster
247. # member node.
248. # Note that max-total-nr-of-instances defines total number of routees, but
249. # number of routees per node will not be exceeded, i.e. if you
250. # define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2
251. # it will deploy 2 routees per new member in the cluster, up to
252. # 25 members.
253. max-nr-of-instances-per-node = 1
254.
255. # Maximum number of routees that will be deployed, in total
256. # on all nodes. See also description of max-nr-of-instances-per-node.
257. # For backwards compatibility reasons, nr-of-instances
258. # has the same purpose as max-total-nr-of-instances for cluster
259. # aware routers and nr-of-instances (if defined by user) takes
260. # precedence over max-total-nr-of-instances.
261. max-total-nr-of-instances = 10000
262.
263. # Defines if routees are allowed to be located on the same node as
264. # the head router actor, or only on remote nodes.
265. # Useful for master-worker scenario where all routees are remote.
266. allow-local-routees = on
267.
268. # Use members with specified role, or all members if undefined or empty.
269. use-role = ""
270.
271. }
272.
273. # Protobuf serializer for cluster messages
274. actor {
275. serializers {
276. akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer"
277. }
278.
279. serialization-bindings {
280. "akka.cluster.ClusterMessage" = akka-cluster
281. }
282.
283. serialization-identifiers {
284. "akka.cluster.protobuf.ClusterMessageSerializer" = 5
285. }
286.
287. router.type-mapping {
288. adaptive-pool = "akka.cluster.routing.AdaptiveLoadBalancingPool"
289. adaptive-group = "akka.cluster.routing.AdaptiveLoadBalancingGroup"
290. }
291. }
292.
293.}
akka-multi-node-testkit
1.#############################################
2.# Akka Remote Testing Reference Config File #
3.#############################################
4.
5.# This is the reference config file that contains all the default settings.
6.# Make your edits/overrides in your application.conf.
7.
8.akka {
9. testconductor {
10.
11. # Timeout for joining a barrier: this is the maximum time any participants
12. # waits for everybody else to join a named barrier.
13. barrier-timeout = 30s
14.
15. # Timeout for interrogation of TestConductor’s Controller actor
16. query-timeout = 10s
17.
18. # Threshold for packet size in time unit above which the failure injector will
19. # split the packet and deliver in smaller portions; do not give value smaller
20. # than HashedWheelTimer resolution (would not make sense)
21. packet-split-threshold = 100ms
22.
23. # amount of time for the ClientFSM to wait for the connection to the conductor
24. # to be successful
25. connect-timeout = 20s
26.
27. # Number of connect attempts to be made to the conductor controller
28. client-reconnects = 30
29.
30. # minimum time interval which is to be inserted between reconnect attempts
31. reconnect-backoff = 1s
32.
33. netty {
34. # (I&O) Used to configure the number of I/O worker threads on server sockets
35. server-socket-worker-pool {
36. # Min number of threads to cap factor-based number to
37. pool-size-min = 1
38.
39. # The pool size factor is used to determine thread pool size
40. # using the following formula: ceil(available processors * factor).
41. # Resulting size is then bounded by the pool-size-min and
42. # pool-size-max values.
43. pool-size-factor = 1.0
44.
45. # Max number of threads to cap factor-based number to
46. pool-size-max = 2
47. }
48.
49. # (I&O) Used to configure the number of I/O worker threads on client sockets
50. client-socket-worker-pool {
51. # Min number of threads to cap factor-based number to
52. pool-size-min = 1
53.
54. # The pool size factor is used to determine thread pool size
55. # using the following formula: ceil(available processors * factor).
56. # Resulting size is then bounded by the pool-size-min and
57. # pool-size-max values.
58. pool-size-factor = 1.0
59.
60. # Max number of threads to cap factor-based number to
61. pool-size-max = 2
62. }
63. }
64. }
65.}
akka-persistence
1.###########################################################
2.# Akka Persistence Extension Reference Configuration File #
3.###########################################################
4.
5.# This is the reference config file that contains all the default settings.
6.# Make your edits in your application.conf in order to override these settings.
7.
8.# Directory of persistence journal and snapshot store plugins is available at the
9.# Akka Community Projects page http://akka.io/community/
10.
11.# Default persistence extension settings.
12.akka.persistence {
13. # Fully qualified class name providing a default internal stash overflow strategy.
14. # It needs to be a subclass of akka.persistence.StashOverflowStrategyConfigurator.
15. # The default strategy throws StashOverflowException.
16. internal-stash-overflow-strategy = "akka.persistence.ThrowExceptionConfigurator"
17. journal {
18. # Absolute path to the journal plugin configuration entry used by
19. # persistent actor or view by default.
20. # Persistent actor or view can override `journalPluginId` method
21. # in order to rely on a different journal plugin.
22. plugin = ""
23. # List of journal plugins to start automatically. Use "" for the default journal plugin.
24. auto-start-journals = []
25. }
26. snapshot-store {
27. # Absolute path to the snapshot plugin configuration entry used by
28. # persistent actor or view by default.
29. # Persistent actor or view can override `snapshotPluginId` method
30. # in order to rely on a different snapshot plugin.
31. # It is not mandatory to specify a snapshot store plugin.
32. # If you don't use snapshots you don't have to configure it.
33. # Note that Cluster Sharding is using snapshots, so if you
34. # use Cluster Sharding you need to define a snapshot store plugin.
35. plugin = ""
36. # List of snapshot stores to start automatically. Use "" for the default snapshot store.
37. auto-start-snapshot-stores = []
38. }
39. # used as default-snapshot store if no plugin configured
40. # (see `akka.persistence.snapshot-store`)
41. no-snapshot-store {
42. class = "akka.persistence.snapshot.NoSnapshotStore"
43. }
44. # Default persistent view settings.
45. view {
46. # Automated incremental view update.
47. auto-update = on
48. # Interval between incremental updates.
49. auto-update-interval = 5s
50. # Maximum number of messages to replay per incremental view update.
51. # Set to -1 for no upper limit.
52. auto-update-replay-max = -1
53. }
54. # Default reliable delivery settings.
55. at-least-once-delivery {
56. # Interval between re-delivery attempts.
57. redeliver-interval = 5s
58. # Maximum number of unconfirmed messages that will be sent in one
59. # re-delivery burst.
60. redelivery-burst-limit = 10000
61. # After this number of delivery attempts a
62. # `ReliableRedelivery.UnconfirmedWarning`, message will be sent to the actor.
63. warn-after-number-of-unconfirmed-attempts = 5
64. # Maximum number of unconfirmed messages that an actor with
65. # AtLeastOnceDelivery is allowed to hold in memory.
66. max-unconfirmed-messages = 100000
67. }
68. # Default persistent extension thread pools.
69. dispatchers {
70. # Dispatcher used by every plugin which does not declare explicit
71. # `plugin-dispatcher` field.
72. default-plugin-dispatcher {
73. type = PinnedDispatcher
74. executor = "thread-pool-executor"
75. }
76. # Default dispatcher for message replay.
77. default-replay-dispatcher {
78. type = Dispatcher
79. executor = "fork-join-executor"
80. fork-join-executor {
81. parallelism-min = 2
82. parallelism-max = 8
83. }
84. }
85. # Default dispatcher for streaming snapshot IO
86. default-stream-dispatcher {
87. type = Dispatcher
88. executor = "fork-join-executor"
89. fork-join-executor {
90. parallelism-min = 2
91. parallelism-max = 8
92. }
93. }
94. }
95.
96. # Fallback settings for journal plugin configurations.
97. # These settings are used if they are not defined in plugin config section.
98. journal-plugin-fallback {
99.
100. # Fully qualified class name providing journal plugin api implementation.
101. # It is mandatory to specify this property.
102. # The class must have a constructor without parameters or constructor with
103. # one `com.typesafe.config.Config` parameter.
104. class = ""
105.
106. # Dispatcher for the plugin actor.
107. plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
108.
109. # Dispatcher for message replay.
110. replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
111.
112. # Removed: used to be the Maximum size of a persistent message batch written to the journal.
113. # Now this setting is without function, PersistentActor will write as many messages
114. # as it has accumulated since the last write.
115. max-message-batch-size = 200
116.
117. # If there is more time in between individual events gotten from the journal
118. # recovery than this the recovery will fail.
119. # Note that it also affects reading the snapshot before replaying events on
120. # top of it, even though it is configured for the journal.
121. recovery-event-timeout = 30s
122.
123. circuit-breaker {
124. max-failures = 10
125. call-timeout = 10s
126. reset-timeout = 30s
127. }
128.
129. # The replay filter can detect a corrupt event stream by inspecting
130. # sequence numbers and writerUuid when replaying events.
131. replay-filter {
132. # What the filter should do when detecting invalid events.
133. # Supported values:
134. # `repair-by-discard-old` : discard events from old writers,
135. # warning is logged
136. # `fail` : fail the replay, error is logged
137. # `warn` : log warning but emit events untouched
138. # `off` : disable this feature completely
139. mode = repair-by-discard-old
140.
141. # It uses a look ahead buffer for analyzing the events.
142. # This defines the size (in number of events) of the buffer.
143. window-size = 100
144.
145. # How many old writerUuid to remember
146. max-old-writers = 10
147.
148. # Set this to `on` to enable detailed debug logging of each
149. # replayed event.
150. debug = off
151. }
152. }
153.
154. # Fallback settings for snapshot store plugin configurations
155. # These settings are used if they are not defined in plugin config section.
156. snapshot-store-plugin-fallback {
157.
158. # Fully qualified class name providing snapshot store plugin api
159. # implementation. It is mandatory to specify this property if
160. # snapshot store is enabled.
161. # The class must have a constructor without parameters or constructor with
162. # one `com.typesafe.config.Config` parameter.
163. class = ""
164.
165. # Dispatcher for the plugin actor.
166. plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
167.
168. circuit-breaker {
169. max-failures = 5
170. call-timeout = 20s
171. reset-timeout = 60s
172. }
173. }
174.}
175.
176.# Protobuf serialization for the persistent extension messages.
177.akka.actor {
178. serializers {
179. akka-persistence-message = "akka.persistence.serialization.MessageSerializer"
180. akka-persistence-snapshot = "akka.persistence.serialization.SnapshotSerializer"
181. }
182. serialization-bindings {
183. "akka.persistence.serialization.Message" = akka-persistence-message
184. "akka.persistence.serialization.Snapshot" = akka-persistence-snapshot
185. }
186. serialization-identifiers {
187. "akka.persistence.serialization.MessageSerializer" = 7
188. "akka.persistence.serialization.SnapshotSerializer" = 8
189. }
190.}
191.
192.
193.###################################################
194.# Persistence plugins included with the extension #
195.###################################################
196.
197.# In-memory journal plugin.
198.akka.persistence.journal.inmem {
199. # Class name of the plugin.
200. class = "akka.persistence.journal.inmem.InmemJournal"
201. # Dispatcher for the plugin actor.
202. plugin-dispatcher = "akka.actor.default-dispatcher"
203.}
204.
205.# Local file system snapshot store plugin.
206.akka.persistence.snapshot-store.local {
207. # Class name of the plugin.
208. class = "akka.persistence.snapshot.local.LocalSnapshotStore"
209. # Dispatcher for the plugin actor.
210. plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
211. # Dispatcher for streaming snapshot IO.
212. stream-dispatcher = "akka.persistence.dispatchers.default-stream-dispatcher"
213. # Storage location of snapshot files.
214. dir = "snapshots"
215. # Number load attempts when recovering from the latest snapshot fails
216. # yet older snapshot files are available. Each recovery attempt will try
217. # to recover using an older than previously failed-on snapshot file
218. # (if any are present). If all attempts fail the recovery will fail and
219. # the persistent actor will be stopped.
220. max-load-attempts = 3
221.}
222.
223.# LevelDB journal plugin.
224.# Note: this plugin requires explicit LevelDB dependency, see below.
225.akka.persistence.journal.leveldb {
226. # Class name of the plugin.
227. class = "akka.persistence.journal.leveldb.LeveldbJournal"
228. # Dispatcher for the plugin actor.
229. plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
230. # Dispatcher for message replay.
231. replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
232. # Storage location of LevelDB files.
233. dir = "journal"
234. # Use fsync on write.
235. fsync = on
236. # Verify checksum on read.
237. checksum = off
238. # Native LevelDB (via JNI) or LevelDB Java port.
239. native = on
240.}
241.
242.# Shared LevelDB journal plugin (for testing only).
243.# Note: this plugin requires explicit LevelDB dependency, see below.
244.akka.persistence.journal.leveldb-shared {
245. # Class name of the plugin.
246. class = "akka.persistence.journal.leveldb.SharedLeveldbJournal"
247. # Dispatcher for the plugin actor.
248. plugin-dispatcher = "akka.actor.default-dispatcher"
249. # Timeout for async journal operations.
250. timeout = 10s
251. store {
252. # Dispatcher for shared store actor.
253. store-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
254. # Dispatcher for message replay.
255. replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
256. # Storage location of LevelDB files.
257. dir = "journal"
258. # Use fsync on write.
259. fsync = on
260. # Verify checksum on read.
261. checksum = off
262. # Native LevelDB (via JNI) or LevelDB Java port.
263. native = on
264. }
265.}
266.
267.akka.persistence.journal.proxy {
268. # Class name of the plugin.
269. class = "akka.persistence.journal.PersistencePluginProxy"
270. # Dispatcher for the plugin actor.
271. plugin-dispatcher = "akka.actor.default-dispatcher"
272. # Set this to on in the configuration of the ActorSystem
273. # that will host the target journal
274. start-target-journal = off
275. # The journal plugin config path to use for the target journal
276. target-journal-plugin = ""
277. # The address of the proxy to connect to from other nodes. Optional setting.
278. target-journal-address = ""
279. # Initialization timeout of target lookup
280. init-timeout = 10s
281.}
282.
283.akka.persistence.snapshot-store.proxy {
284. # Class name of the plugin.
285. class = "akka.persistence.journal.PersistencePluginProxy"
286. # Dispatcher for the plugin actor.
287. plugin-dispatcher = "akka.actor.default-dispatcher"
288. # Set this to on in the configuration of the ActorSystem
289. # that will host the target snapshot-store
290. start-target-snapshot-store = off
291. # The journal plugin config path to use for the target snapshot-store
292. target-snapshot-store-plugin = ""
293. # The address of the proxy to connect to from other nodes. Optional setting.
294. target-snapshot-store-address = ""
295. # Initialization timeout of target lookup
296. init-timeout = 10s
297.}
298.
299.# LevelDB persistence requires the following dependency declarations:
300.#
301.# SBT:
302.# "org.iq80.leveldb" % "leveldb" % "0.7"
303.# "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8"
304.#
305.# Maven:
306.# <dependency>
307.# <groupId>org.iq80.leveldb</groupId>
308.# <artifactId>leveldb</artifactId>
309.# <version>0.7</version>
310.# </dependency>
311.# <dependency>
312.# <groupId>org.fusesource.leveldbjni</groupId>
313.# <artifactId>leveldbjni-all</artifactId>
314.# <version>1.8</version>
315.# </dependency>