#!/sbin/openrc-run

description="HTTP Server for LLM inference"
command=/usr/bin/llama-server
: ${command_user:=llama-server:llama-server}

output_logger="logger -t llama-server -p daemon.info"
error_logger="logger -t llama-server -p daemon.info"

start_pre() {
	if [ -z "${command_args}" ]; then
		eerror "command_args not specified in /etc/conf.d/llama-server"
		return 1
	fi
}

no_new_privs="yes"
supervisor="supervise-daemon"
