# ODP runtime configuration options # # This template configuration file (odp-linux-generic.conf) is hardcoded # during configure/build phase and the values defined here are used if # optional ODP_CONFIG_FILE is not set. This configuration file MUST # include all configuration options. # # ODP_CONFIG_FILE can be used to override default values and it doesn't # have to include all available options. The missing options are # replaced with hardcoded default values. # # The options defined here are implementation specific and valid option # values should be checked from the implementation code. # # See libconfig syntax: https://hyperrealm.github.io/libconfig/libconfig_manual.html#Configuration-Files # Mandatory fields odp_implementation = "linux-generic" config_file_version = "0.1.15" # System options system: { # CPU frequency value returned by odp_cpu_hz() and odp_cpu_hz_id() # calls on platforms where frequency isn't available using standard # Linux methods. cpu_mhz = 0 # CPU max frequency value returned by odp_cpu_hz_max() and # odp_cpu_hz_max_id() calls on platforms where max frequency isn't # available using standard Linux methods. cpu_mhz_max = 1400 # Maximum number of ODP threads that can be created. # odp_thread_count_max() returns this value or the build time # maximum ODP_THREAD_COUNT_MAX, whichever is lower. This setting # can be used to reduce thread related resource usage. thread_count_max = 256 } # Shared memory options shm: { # Number of cached default size huge pages. These pages are allocated # during odp_init_global() and freed back to the kernel in # odp_term_global(). A value of zero means no pages are cached. # No negative values should be used here, they are reserved for future # implementations. # # ODP will reserve as many huge pages as possible, which may be less # than requested here if the system does not have enough huge pages # available. # # When using process mode threads, this value should be set to 0 # because the current implementation won't work properly otherwise. num_cached_hp = 0 # Huge page usage limit in kilobytes. Memory reservations larger than # this value are done using huge pages (if available). Smaller # reservations are done using normal pages to conserve memory. huge_page_limit_kb = 64 # Amount of memory pre-reserved for ODP_SHM_SINGLE_VA usage in kilobytes single_va_size_kb = 262144 } # Pool options pool: { # Default thread local cache size. Cache size in pool parameters is # initialized to this value. Value must be a multiple of burst_size # (min 2 x burst_size). # # The total maximum number of cached events is the number of threads # using the pool multiplied with local_cache_size. local_cache_size = 256 # Transfer size between local cache and global pool. Must be larger # than zero. burst_size = 32 # Packet pool options pkt: { # Maximum packet data length in bytes max_len = 65536 # Maximum number of packets per pool. Power of two minus one # results optimal memory usage (e.g. (256 * 1024) - 1). max_num = 262143 # Base alignment for segment data. When set to zero, # cache line size is used. Use power of two values. This is # also the maximum value for the packet pool alignment param. base_align = 0 } buf: { # Minimum data alignment. The alignment request in pool # parameters is rounded up to this value. When set to zero, # cache line size is used. Use power of two values. min_align = 0 } } # General pktio options pktio: { # Frame start offset from packet base pointer at packet input. This can # be used (together with pool.pkt.base_align option) to tune packet data # alignment for received frames. Currently, packet IO drivers # (zero-copy DPDK, loop and ipc) that do not copy data ignore this # option. pktin_frame_offset = 0 } # DPDK pktio options pktio_dpdk: { # Default options num_rx_desc = 128 num_tx_desc = 512 rx_drop_en = 0 # Store RX RSS hash result as ODP flow hash set_flow_hash = 0 # Driver specific options (use PMD names from DPDK) net_ixgbe: { rx_drop_en = 1 } } # netmap pktio options pktio_netmap: { # Interface specific options virt: { nr_rx_slots = 0 nr_tx_slots = 0 } } queue_basic: { # Maximum queue size. Value must be a power of two. max_queue_size = 8192 # Default queue size. Value must be a power of two. default_queue_size = 4096 } sched_basic: { # Priority level spread # # Each priority level is spread into multiple scheduler internal queues. # This value defines the number of those queues. Minimum value is 1. # Each thread prefers one of the queues over other queues. A higher # spread value typically improves parallelism and thus is better for # high thread counts, but causes uneven service level for low thread # counts. Typically, optimal value is the number of threads using # the scheduler. prio_spread = 4 # Weight of the preferred scheduler internal queue # # Each thread prefers one of the internal queues over other queues. # This value controls how many times the preferred queue is polled # between a poll to another internal queue. Minimum value is 1. A higher # value typically improves parallelism as threads work mostly on their # preferred queues, but causes uneven service level for low thread # counts as non-preferred queues are served less often prio_spread_weight = 63 # Burst size configuration per priority. The first array element # represents the highest queue priority. The scheduler tries to get # burst_size_default[prio] events from a queue and stashes those that # cannot be passed to the application immediately. More events than the # default burst size may be returned from application request, but no # more than burst_size_max[prio]. # # Large burst sizes improve throughput, but decrease application # responsiveness to higher priority events due to head of line blocking # caused by a burst of lower priority events. burst_size_default = [ 32, 32, 32, 32, 32, 16, 8, 4] burst_size_max = [255, 255, 255, 255, 255, 16, 16, 8] # Automatically updated schedule groups # # DEPRECATED: use odp_schedule_config() API instead # # API specification defines that ODP_SCHED_GROUP_ALL, # _WORKER and _CONTROL are updated automatically. These options can be # used to disable these group when not used. Set value to 0 to disable # a group. Performance may improve when unused groups are disabled. group_enable: { all = 1 worker = 1 control = 1 } } timer: { # Use inline timer implementation # # By default, timer processing is done in background threads (thread per # timer pool). With inline implementation timers are processed by ODP # application threads instead. When using inline timers the application # has to call odp_schedule() or odp_queue_deq() regularly to actuate # timer processing. # # 0: Use POSIX timer and background threads to process timers # 1: Use inline timer implementation and application threads to process # timers inline = 0 # Inline timer poll interval # # When set to 1 inline timers are polled during every schedule round. # Increasing the value reduces timer processing overhead while # decreasing accuracy. Ignored when inline timer is not used. inline_poll_interval = 10 # Inline timer poll interval in nanoseconds # # When inline_poll_interval is larger than 1, use this option to limit # inline timer polling rate in nanoseconds. By default, this defines the # maximum rate a thread may poll timers. If a timer pool is created with # a higher resolution than this, the polling rate is increased # accordingly. Ignored when inline timer is not used. inline_poll_interval_nsec = 500000 # Inline timer use of threads # # Select which thread types process non-private timer pools in inline # timer implementation. Thread type does not affect private timer # pool procesessing, those are always processed by the thread which # created the pool. Ignored when inline timer is not used. # # 0: Both control and worker threads process non-private timer pools # 1: Only worker threads process non-private timer pools # 2: Only control threads process non-private timer pools inline_thread_type = 0 }