Commit 0ee46f17 authored by Tim Bleimehl's avatar Tim Bleimehl 🤸🏼
Browse files

wip

parent a8c3e1cc
......@@ -18,8 +18,6 @@ log = logging.getLogger("databasebackupper")
Todo:
Refactor:
* tidy up, simplfy
"""
......
......@@ -45,3 +45,6 @@ class DEFAULT(ConfigBase):
DOCKER_COMMAND: str = "docker"
KUBECTL_COMMAND: str = "kubectl"
KUBECTL_NAMESPACE_PARAM: str = "--all-namespaces"
KUBECTL_DEFAULT_NAMESPACE: str = "default"
# Where should we search for Workloads that could contain run a Database Pod
KUBECTL_VALID_WORKLOAD_TYPES: List[str] = ["Deployment", "StatefulSet"]
from typing import List, Dict
from ast import Str
from typing import List, Dict, Union, Tuple
import logging
from Configs import getConfig
import json
from dataclasses import dataclass
from CoDaBackup.config import DEFAULT
from CoDaBackup.label import ValidLabels, Label
......@@ -10,19 +12,133 @@ from CoDaBackup.executer import Executer
config: DEFAULT = getConfig(config_classes=[DEFAULT])
log = logging.getLogger("databasebackupper")
# TodO: You are here. Make the docker* func work with new Container class
@dataclass
class Container:
uid: str
name: str
backup_labels: List[Label]
other_labels: List[Label]
desc: Dict = None
parent: Dict = None
class ContainerHelper:
@classmethod
def kubernetes_get_pod_names_to_be_backed_up(cls, label: str = None) -> List[str]:
# kubectl get pods -n webapps-extern --selector='workload.user.cattle.io/workloadselector=deployment-webapps-extern-gitlab'
if not label:
label = ValidLabels.enabled.key
pods: Dict = json.loads(
def kubernetes_get_pods(
cls,
labels: List[Label] = None,
namespace: str = None,
all_namespaces: bool = False,
describe: bool = False,
) -> Union[List[str], List[Container]]:
"""[summary]
Args:
labels (List[Label], optional): Label to select pods. Defaults to None.
namespace (str, optional): kubernetes namespace to search in. Defaults to None.
all_namespaces (bool, optional): If true search all namespaces. This will ignore `namespace`. Defaults to False.
describe (bool, optional): If True return list of `Container` instances otherwise just the uid of the pod. Defaults to False.
Returns:
Union[str, Dict]: Will return a list of pod uid if `describe` is set to False. Other wise all informations `kubectl` provides as dict
"""
if all_namespaces:
namespace_arg = "--all-namespaces"
else:
namespace_arg = (
f"-n {namespace if namespace else config.KUBECTL_DEFAULT_NAMESPACE}"
)
labels_str: str = ",".join([str(label) for label in labels])
selector_arg: str = f"--selector '{labels_str}'" if labels_str else ""
pod_descs: Dict = json.loads(
Executer.exec(
f"{config.KUBECTL_COMMAND} get pods {config.KUBECTL_NAMESPACE_PARAM} --selector='{label}' -o json"
f"{config.KUBECTL_COMMAND} get pods {namespace_arg} {selector_arg} -o json"
)
)["items"]
return [pod["metadata"]["name"] for pod in pods]
pods: List[Container] = []
for pod_desc in pod_descs:
pods.append(
Container(
uid=pod_desc["metadata"]["uid"],
name=pod_desc["metadata"]["name"],
backup_labels=[
Label(lbl)
for lbl in pod_desc["metadata"]["labels"]
if Label(lbl) in ValidLabels.iter()
],
other_labels=[
Label(lbl)
for lbl in pod_desc["metadata"]["labels"]
if Label(lbl) not in ValidLabels.iter()
],
desc=pod_desc,
)
)
if describe:
return pods
else:
return [pod.uid for pod in pods]
@classmethod
def kubernetes_get_pods_to_be_backed_up(
cls, label: str = None, namespace: str = None, all_namespaces: bool = False
) -> List[Container]:
workloads = cls.kubernetes_get_workloads_to_be_backed_up(
label, namespace, all_namespaces
)
pods: List[str] = []
for workload in workloads:
if (
"spec" in workload
and "selector" in workload["spec"]
and "matchLables" in workload["spec"]["selector"]
):
# the Workload specs.selector.matchLabels defines how pods running under this workload have to be labeled
# this provides us the information to find pods belonging to a/this specific workload
labels: List[Label] = [
Label(lbl) for lbl in workload["spec"]["selector"]["matchLabels"]
]
wl_pods = cls.kubernetes_get_pods(
labels=labels, namespace=namespace, all_namespaces=all_namespaces
)
for wl_pod in wl_pods:
# we attach the workloads labels because these could contain the metadata to access the database
wl_pod.backup_labels.extend(
[Label(lbl) for lbl in workload["metadata"]["labels"]]
)
# Attach the parent workload data just for good measure... not in any use yet. maybe we can delete this step
wl_pod.parent = workload
pods.extend(wl_pod)
return pods
@classmethod
def kubernetes_get_workloads_to_be_backed_up(
cls, label: str = None, namespace: str = None, all_namespaces: bool = False
) -> List[str]:
# kubectl get all
# kubectl get pods -o jsonpath='{range .items[?(.kind=StatefulSet)]}{.metadata.name}{end}'
if not label:
label = str(ValidLabels.enabled)
if all_namespaces:
namespace_arg = "--all-namespaces"
else:
namespace_arg = (
f"-n {namespace if namespace else config.KUBECTL_DEFAULT_NAMESPACE}"
)
workloads = []
for workload_type in config.KUBECTL_VALID_WORKLOAD_TYPES:
results_items: Dict = json.loads(
Executer.exec(
f"{config.KUBECTL_COMMAND} get all {namespace_arg} --selector={label} -o json"
)
)["items"]
for res_item in results_items:
if "kind" in res_item and res_item["kind"] == workload_type:
workloads.append(res_item)
return workloads
@classmethod
def docker_get_container_to_be_backed_up(cls, label: str = None) -> List[str]:
......
......@@ -17,13 +17,17 @@ class Label:
def __init__(
self,
label: str,
type: Type,
label: Union[str, Dict[str, str]],
type: Type = str,
possible_values: List = None,
default: Any = None,
info: str = None,
):
key, *val = label.split("=")
if isinstance(label, Dict):
key = list(label.keys())[0]
val = list(label.values())[0]
else:
key, *val = label.split("=")
self.key: str = f"{self._BASE_LABEL}/{key}"
if type == bool and isinstance(val, str):
val = util.strtobool(val)
......@@ -40,10 +44,10 @@ class Label:
return hash(self.key)
def __str__(self):
return f"<Label '{self.key}={self.val}'>"
return f"{self.key}={self.val}"
def __repr__(self):
return f"<Label '{self.key}'>"
return f"<Label '{self.key}={self.val}'>"
class ValidLabels:
......
......@@ -102,4 +102,21 @@ Thats it. We now have a directory `./backups/` in front of us, with all database
# Placeholder volume for docker.sock
VOLUME ["/var/run/docker.sock"]
# Placeholder volume for kubectl config
VOLUME ["/.kube/config"]
\ No newline at end of file
VOLUME ["/.kube/config"]
# Dev Notes
https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/
Start Test Rancher K8s
```
docker run -d \
--restart=unless-stopped \
-p 80:80 -p 443:443 \
--privileged \
--name rancher \
-v /var/run/docker.sock:/var/run/docker.sock \
rancher/rancher:stable
```
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment