diff --git a/python-jobs/Dockerfile b/python-jobs/Dockerfile new file mode 100644 index 0000000..c89aecc --- /dev/null +++ b/python-jobs/Dockerfile @@ -0,0 +1,2 @@ +FROM python:3.12-alpine +COPY pipeline.py . diff --git a/python-jobs/justfile b/python-jobs/justfile new file mode 100644 index 0000000..d47509c --- /dev/null +++ b/python-jobs/justfile @@ -0,0 +1,9 @@ +build: + podman build . -t registry.wayl.one/dummypipe:alpine +push: + podman push registry.wayl.one/dummypipe:alpine +create-ns: + kubectl create ns jobrunner + +regcred: + kubectl get secret -n default regcred --output=yaml -o yaml | sed 's/namespace: default/namespace: jobrunner/' | kubectl apply -n jobrunner -f - && echo deployed secret || echo secret exists diff --git a/python-jobs/pipeline.py b/python-jobs/pipeline.py new file mode 100644 index 0000000..493b54e --- /dev/null +++ b/python-jobs/pipeline.py @@ -0,0 +1,14 @@ +import time +import logging + +logger = logging.getLogger() +logger.setLevel(logging.INFO) +logger.addHandler(logging.StreamHandler()) + +# log to ./log.log +logger.addHandler(logging.FileHandler("./log.log")) + +for node in range(120): + print(f"running {node+1} of 120") + time.sleep(5) + logger.info(f"done {node}") diff --git a/python-jobs/run_pipeline.py b/python-jobs/run_pipeline.py new file mode 100644 index 0000000..20485dd --- /dev/null +++ b/python-jobs/run_pipeline.py @@ -0,0 +1,88 @@ +# from kubernetes import client, config + +# # Load the kubeconfig file +# config.load_kube_config() + +# # Create an instance of the Kubernetes API client +# api_instance = client.BatchV1Api() + +# # Define the job specification +# job_spec = { +# "template": { +# "metadata": {"labels": {"app": "python-pipeline"}}, +# "spec": { +# "containers": [ +# { +# "name": "python-pipeline", +# "image": "dummypipe", +# "resources": { +# "requests": {"memory": "512Mi"}, +# "limits": {"memory": "1Gi"}, +# }, +# "cpu": {"requests": "0.5", "limits": "1"}, +# "command": ["python", "pipeline.py"], +# } +# ] +# }, +# }, +# } + +# # Create the job object +# job = client.V1Job( +# api_version="apps/v1", +# kind="Job", +# metadata=client.V1ObjectMeta(name="python-pipeline"), +# spec=client.V1JobSpec(**job_spec), +# ) + +# # Create the job in the jobrunner namespace +# api_instance.create_namespaced_job(namespace="jobrunner", body=job) + +from kubernetes import client, config +import string +import random + +# Load the default kubeconfig +config.load_kube_config() + +# Define the API client for batch jobs +api_instance = client.BatchV1Api() + + +def get_random_string(length): + # choose from all lowercase letter + letters = string.ascii_lowercase + result_str = "".join(random.choice(letters) for i in range(length)) + return result_str + + +# Create a new job object +job = client.V1Job( + api_version="batch/v1", + kind="Job", + metadata=client.V1ObjectMeta(name=f"myjob{get_random_string(5)}"), + spec=client.V1JobSpec( + ttl_seconds_after_finished=100, + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={"app": f"myjobspod{get_random_string(5)}"} + ), + spec=client.V1PodSpec( + containers=[ + client.V1Container( + name=f"myjobrunnercontainer{get_random_string(5)}", + image="registry.wayl.one/dummypipe:alpine", + command=["python", "pipeline.py"], + image_pull_policy="Always", + ), + ], + restart_policy="Never", + image_pull_secrets=[client.V1LocalObjectReference(name="regcred")], + ), + ), + backoff_limit=1, + ), +) + +# Call the Kubernetes API to create the job +api_instance.create_namespaced_job(namespace="jobrunner", body=job)