Ever thought of using GPT model for running Kubernetes?

Generating Kubernetes commands with the help of OpenAI’s GPT3 Model.

Let’s start by understanding what’s GPT?

Let’s look at the code for predicting k8s commands.

"""Creates the Example and GPT classes for a user to interface with the OpenAI API."""import openaidef set_openai_key(key):
"""Sets OpenAI key."""
openai.api_key = key
class Example():
"""Stores an input, output pair and formats it to prime the model."""
def __init__(self, inp, out):
self.input = inp
self.output = out
def get_input(self):
"""Returns the input of the example."""
return self.input
def get_output(self):
"""Returns the intended output of the example."""
return self.output
def format(self):
"""Formats the input, output pair."""
return f"input: {self.input}\noutput: {self.output}\n"
class GPT:
"""The main class for a user to interface with the OpenAI API.
A user can add examples and set parameters of the API request."""
def __init__(self, engine='davinci',
self.examples = []
self.engine = engine
self.temperature = temperature
self.max_tokens = max_tokens
def add_example(self, ex):
"""Adds an example to the object. Example must be an instance
of the Example class."""
assert isinstance(ex, Example), "Please create an Example object."
def get_prime_text(self):
"""Formats all examples to prime the model."""
return '\n'.join(self.examples) + '\n'
def get_engine(self):
"""Returns the engine specified for the API."""
return self.engine
def get_temperature(self):
"""Returns the temperature specified for the API."""
return self.temperature
def get_max_tokens(self):
"""Returns the max tokens specified for the API."""
return self.max_tokens
def craft_query(self, prompt):
"""Creates the query for the API request."""
return self.get_prime_text() + "input: " + prompt + "\n"
def submit_request(self, prompt):
"""Calls the OpenAI API with the specified parameters."""
response = openai.Completion.create(engine=self.get_engine(),
return response
def get_top_reply(self, prompt):
"""Obtains the best result as returned by the API."""
response = self.submit_request(prompt)
return response['choices'][0]['text']
#!/usr/bin/python3print("content-type: text/html")
import cgi
import subprocess
# import OPENAPI
import json
import openai
# import gpt
from gpt import GPT
from gpt import Example
# Set the GPT Engine
gpt = GPT(engine="davinci",
# Add Examples to be trained
gpt.add_example(Example('Launch a myweb deployment with httpd image.',
'kubectl create deployment myweb --image=httpd'))
gpt.add_example(Example('Run a test deployment with vimal13/apache-webserver-php as image',
'kubectl create deployment test --image=vimal13/apache-webserver-php'))
gpt.add_example(Example('Run a webapptest deployment with vimal13/apache-webserver-php as image',
'kubectl create deployment webapptest --image=vimal13/apache-webserver-php'))
gpt.add_example(Example('Run a webapptesting deployment with httpd as image',
'kubectl create deployment webapptesting --image=httpd'))
gpt.add_example(Example('Launch a deployment with name as webapp and image as httpd',
'kubectl create deployment webapp --image=httpd'))
gpt.add_example(Example('Create a pod with name as testing and image as httpd',
'kubectl run testing --image=httpd'))
gpt.add_example(Example('Launch a pod with webpod as name and vimal13/apache-webserver-php as image',
'kubectl run webpod --image=vimal13/apache-webserver-php'))
gpt.add_example(Example('Launch a pod with webtest as name and httpd as image',
'kubectl run webtest --image=httpd'))
gpt.add_example(Example('Delete deployment with name test',
'kubectl delete deployment test'))
gpt.add_example(Example('Delete deployment with name webapp',
'kubectl delete deployment webapp'))
gpt.add_example(Example('Delete a pod with name webtest',
'kubectl delete pod webtest'))
gpt.add_example(Example('Expose the deployment test as NodePort type and on port 80',
'kubectl expose deployment test --port=80 --type=NodePort'))
gpt.add_example(Example('Expose the deployment webtest as External LoadBalancer type and on port 80',
'kubectl expose deployment webtest --port=80 --type=LoadBalancer'))
gpt.add_example(Example('Expose the deployment webapp as ClusterIP type and on port 80',
'kubectl expose deployment webapp --port=80 --type=ClusterIP'))
gpt.add_example(Example('Create 5 replicas of test deployment',
'kubectl scale deployment test --replicas=5'))
gpt.add_example(Example('Create 3 replicas of webapp deployment',
'kubectl scale deployment webapp --replicas=3'))
gpt.add_example(Example('Delete all resources of Kubernetes',
'kubectl delete all --all'))
gpt.add_example(Example('Get the list of deployments',
'kubectl get deployments'))
gpt.add_example(Example('Get the list of services',
'kubectl get svc'))
gpt.add_example(Example('List all the pods',
'kubectl get pods'))
f = cgi.FieldStorage()
prompt = f.getvalue('x')
# Getting the Prediction
output = gpt.submit_request(prompt)
res = output.choices[0].text
cmd = res.split("output")[1].split(":")[1].strip()
cmd = cmd + " --kubeconfig /root/kubews/admin.conf"
output = subprocess.getoutput('sudo ' + cmd)