Example simudyneSDK.properties

Last updated on 26th March 2024

The Simudyne SDK often requires a simudyneSDK.properties file in order to be configured for everything from Parquet output, port, random number generation and more. Below is a sample, but verbose properties file for you to make usage of.

simudyneSDK.properties

### SimudyneSDK Configuration file

### NEXUS-SERVER ###

nexus-server.port = 8080
nexus-server.hostname = 127.0.0.1
# nexus-server.webserver-root = console/src/main/resources/webapp
# nexus-server.autocompile-root=sandbox/src/main/scala/sandbox/models
# nexus-server.parallel-nexus-limit=2
# nexus-server.nexus-lifetime = 10
# nexus-server.run-past-end = false
# nexus-server.rate-limit = 5
# nexus-server.batchrun-tick-limit = true
# nexus-server.batchrun-runs-limit = 100
# nexus-server.batchrun-lifetime = 10
# nexus-server.parallel-batchrun-limit
# nexus-server.health-check = false
# nexus-server.health-check-ticks = 5
# console.scenario-file = scenario/testScenario.json

### CORE ###

# core.prng-seed = 1640702558671097951
# core.uiconfig.readonly=false
core.export-path=output
# core.export.username=default
# core.export.password=work12345
# core.csv-export.enabled=true
# core.parquet-export.enabled=true
# core.sql-export.enabled=true
# core.sql-export-path=jdbc:mysql://localhost:3306/sdk
# core.json-export.enabled=true
# feature.interactive-parquet-output=true
# core.hive-export.enabled=false
# core.hive-export-path=hive2://localhost:10000/default

# For interval choose either {default (every step), abridged (setup, kickoff, end per @ModelSettings), or N (N being a number of Nth steps to output)]
core.export.output-interval=default

#core.export.folder-structure=group-by-run
core.export.folder-structure=group-by-type

### CORE-ABM ###

core-abm.max-messaging-phases = 50

# For serialization-level, choose between : {NONE,CHECKED}

# core-abm.serialize.sections=true
# core-abm.serialize.activities=true
core-abm.serialize.agents=true
# core-abm.serialize.links=true
core-abm.serialize.accumulators=true
# core-abm.sort-inboxes=true
# core-abm.local-parallelism=
# core-abm.debug=false
akka.log-dead-letters-during-shutdown = false

### CORE-ABM-SPARK ###

# Uncomment the following line to enable the Spark backend as the default.

# core-abm.backend-implementation=simudyne.core.graph.spark.SparkGraphBackend

# Default Spark settings. Comment these lines if you will be providing the configuration
# via spark-submit or similar, otherwise these settings are required.

# For log-level, choose between : {OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL}

core-abm-spark.master-url = local[*]
# core-abm-spark.spark.executor.memory = 2g
# core-abm-spark.spark.sql.shuffle.partitions = 24
core-abm-spark.checkpoint-directory = /var/tmp
core-abm-spark.log-level = WARN

### CORE-MC-SPARK ###
# Uncomment the following line to enable the Spark runner as the default for multi runner simulations.

# core-runner.runner-backend = simudyne.core.exec.runner.spark.SparkRunnerBackend

# Default Spark settings. Comment these lines if you will be providing the configuration
# via spark-submit or similar, otherwise these settings are required.

# For log-level, choose between : {OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL}

core-runner-spark.master-url = local[*]
# core-runner-spark.executor.memory = 2g
# core-runner-spark.partitions = 24
core-runner-spark.log-level = WARN
#core-runner-spark.task.cpus = 1

### CORE ABM distributed (experimental)
# core-abm.backend-implementation=simudyne.core.graph.experimental.dig.treelike.backend.NoClusterBackend
# core-abm.backend-implementation=simudyne.core.graph.experimental.dig.treelike.backend.SubprocessBackend
# core.graph.experimental.clusterSize=3
# core.graph.experimental.timeouts.base=240
# core.graph.experimental.distributed.log-level = ALL
# feature.immutable-schema=false

### CORE GRAPH distributed (experimental)
core-graph-akka.master-url = local[*]
# core-graph-akka.executor.memory = 2g
# core-graph-akka.partitions = 24
# core-graph-akka.task.cpus = 1