Get active and pending instances.
curl --request GET \
--url https://api.shadeform.ai/v1/instances \
--header 'X-API-KEY: <api-key>'
{
"instances": [
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851",
"cloud": "hyperstack",
"region": "canada-1",
"shade_instance_type": "A6000",
"cloud_instance_type": "gpu_1x_a6000",
"cloud_assigned_id": "13b057d7-e266-4869-985f-760fe75a78b3",
"shade_cloud": true,
"name": "cool-gpu-server",
"configuration": {
"memory_in_gb": 12,
"storage_in_gb": 256,
"vcpus": 6,
"num_gpus": 1,
"gpu_type": "A100",
"interconnect": "pcie",
"vram_per_gpu_in_gb": 48,
"os": "ubuntu_22_shade_os"
},
"ip": "1.0.0.1",
"ssh_user": "shadeform",
"ssh_port": 22,
"status": "active",
"cost_estimate": "103.4",
"hourly_price": 210,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"port_mappings": [
{
"internal_port": 8000,
"external_port": 80
}
],
"created_at": "2016-08-29T09:12:33.001Z",
"deleted_at": "2016-08-29T09:12:33.001Z"
}
]
}
Returns an InstancesResponse object.
The response is of type object
.
curl --request GET \
--url https://api.shadeform.ai/v1/instances \
--header 'X-API-KEY: <api-key>'
{
"instances": [
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851",
"cloud": "hyperstack",
"region": "canada-1",
"shade_instance_type": "A6000",
"cloud_instance_type": "gpu_1x_a6000",
"cloud_assigned_id": "13b057d7-e266-4869-985f-760fe75a78b3",
"shade_cloud": true,
"name": "cool-gpu-server",
"configuration": {
"memory_in_gb": 12,
"storage_in_gb": 256,
"vcpus": 6,
"num_gpus": 1,
"gpu_type": "A100",
"interconnect": "pcie",
"vram_per_gpu_in_gb": 48,
"os": "ubuntu_22_shade_os"
},
"ip": "1.0.0.1",
"ssh_user": "shadeform",
"ssh_port": 22,
"status": "active",
"cost_estimate": "103.4",
"hourly_price": 210,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"port_mappings": [
{
"internal_port": 8000,
"external_port": 80
}
],
"created_at": "2016-08-29T09:12:33.001Z",
"deleted_at": "2016-08-29T09:12:33.001Z"
}
]
}
Get active and pending instances.
curl --request GET \
--url https://api.shadeform.ai/v1/instances \
--header 'X-API-KEY: <api-key>'
{
"instances": [
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851",
"cloud": "hyperstack",
"region": "canada-1",
"shade_instance_type": "A6000",
"cloud_instance_type": "gpu_1x_a6000",
"cloud_assigned_id": "13b057d7-e266-4869-985f-760fe75a78b3",
"shade_cloud": true,
"name": "cool-gpu-server",
"configuration": {
"memory_in_gb": 12,
"storage_in_gb": 256,
"vcpus": 6,
"num_gpus": 1,
"gpu_type": "A100",
"interconnect": "pcie",
"vram_per_gpu_in_gb": 48,
"os": "ubuntu_22_shade_os"
},
"ip": "1.0.0.1",
"ssh_user": "shadeform",
"ssh_port": 22,
"status": "active",
"cost_estimate": "103.4",
"hourly_price": 210,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"port_mappings": [
{
"internal_port": 8000,
"external_port": 80
}
],
"created_at": "2016-08-29T09:12:33.001Z",
"deleted_at": "2016-08-29T09:12:33.001Z"
}
]
}
Returns an InstancesResponse object.
The response is of type object
.
curl --request GET \
--url https://api.shadeform.ai/v1/instances \
--header 'X-API-KEY: <api-key>'
{
"instances": [
{
"id": "d290f1ee-6c54-4b01-90e6-d701748f0851",
"cloud": "hyperstack",
"region": "canada-1",
"shade_instance_type": "A6000",
"cloud_instance_type": "gpu_1x_a6000",
"cloud_assigned_id": "13b057d7-e266-4869-985f-760fe75a78b3",
"shade_cloud": true,
"name": "cool-gpu-server",
"configuration": {
"memory_in_gb": 12,
"storage_in_gb": 256,
"vcpus": 6,
"num_gpus": 1,
"gpu_type": "A100",
"interconnect": "pcie",
"vram_per_gpu_in_gb": 48,
"os": "ubuntu_22_shade_os"
},
"ip": "1.0.0.1",
"ssh_user": "shadeform",
"ssh_port": 22,
"status": "active",
"cost_estimate": "103.4",
"hourly_price": 210,
"launch_configuration": {
"type": "docker",
"docker_configuration": {
"image": "vllm/vllm-openai:latest",
"args": "--model mistralai/Mistral-7B-v0.1",
"shared_memory_in_gb": 8,
"envs": [
{
"name": "HUGGING_FACE_HUB_TOKEN",
"value": "hugging_face_api_token"
}
],
"port_mappings": [
{
"host_port": 80,
"container_port": 8000
}
],
"volume_mounts": [
{
"host_path": "~/.cache/huggingface",
"container_path": "/root/.cache/huggingface"
}
]
},
"script_configuration": {
"base64_script": "IyEvYmluL2Jhc2gKCiMgRW5kbGVzcyBsb29wCndoaWxlIHRydWUKZG8KICAgICMgRmV0Y2ggYSBjYXQgZmFjdCB3aXRoIGEgbWF4aW11bSBsZW5ndGggb2YgMTQwIGNoYXJhY3RlcnMKICAgIGN1cmwgLS1uby1wcm9ncmVzcy1tZXRlciBodHRwczovL2NhdGZhY3QubmluamEvZmFjdD9tYXhfbGVuZ3RoPTE0MAoKICAgICMgUHJpbnQgYSBuZXdsaW5lIGZvciByZWFkYWJpbGl0eQogICAgZWNobwoKICAgICMgU2xlZXAgZm9yIDMgc2Vjb25kcyBiZWZvcmUgdGhlIG5leHQgaXRlcmF0aW9uCiAgICBzbGVlcCAzCmRvbmUKCgo="
}
},
"port_mappings": [
{
"internal_port": 8000,
"external_port": 80
}
],
"created_at": "2016-08-29T09:12:33.001Z",
"deleted_at": "2016-08-29T09:12:33.001Z"
}
]
}