ColossalAI/colossalai/legacy/inference/serving/torch_serve/config.properties

11 lines
286 B
INI

inference_address=http://0.0.0.0:8084
management_address=http://0.0.0.0:8085
metrics_address=http://0.0.0.0:8086
enable_envvars_config=true
install_py_dep_per_model=true
number_of_gpu=1
load_models=all
max_response_size=655350000
default_response_timeout=6000
model_store=./model_store