Templates
/templates/save
Create a new template
POST
/
templates
/
save
curl --request POST \
--url https://api.shadeform.ai/v1/templates/save \
--header 'Content-Type: application/json' \
--header 'X-API-KEY: <api-key>' \
--data '{
"name": "My Template",
"description": "A template for ML workloads",
"public": true,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"auto_delete": {
"date_threshold": "2006-01-02T15:04:05-07:00",
"spend_threshold": "3.14"
},
"alert": {
"date_threshold": "2006-01-02T15:04:05-07:00",
"spend_threshold": "3.14"
},
"volume_mount": {
"auto": true
},
"tags": [
"ml",
"pytorch"
],
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"networking": {
"ufw_rules": [
{
"rule": "allow",
"from_ip": "192.168.1.0/24",
"to_ip": "10.0.0.0/8",
"port": "80",
"proto": "tcp"
}
]
}
}'
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851"
}
Authorizations
Body
application/json
Response
200 - application/json
Returns a TemplateCreateResponse object
Response of the /templates/save API call
curl --request POST \
--url https://api.shadeform.ai/v1/templates/save \
--header 'Content-Type: application/json' \
--header 'X-API-KEY: <api-key>' \
--data '{
"name": "My Template",
"description": "A template for ML workloads",
"public": true,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"auto_delete": {
"date_threshold": "2006-01-02T15:04:05-07:00",
"spend_threshold": "3.14"
},
"alert": {
"date_threshold": "2006-01-02T15:04:05-07:00",
"spend_threshold": "3.14"
},
"volume_mount": {
"auto": true
},
"tags": [
"ml",
"pytorch"
],
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"networking": {
"ufw_rules": [
{
"rule": "allow",
"from_ip": "192.168.1.0/24",
"to_ip": "10.0.0.0/8",
"port": "80",
"proto": "tcp"
}
]
}
}'
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851"
}
Assistant
Responses are generated using AI and may contain mistakes.