Commit aa582472 authored by Tim Bleimehl's avatar Tim Bleimehl 🤸🏼
Browse files

wip

parent f47e5b15
......@@ -191,6 +191,7 @@ class BackupManager:
self.generate_backup_filename(),
)
)
log.debug(f"Try create base dir {p.parent}")
p.parent.mkdir(parents=True, exist_ok=True)
return p
......
......@@ -38,7 +38,7 @@ class BaseBackupper:
db_container.kubernetes_namespace
if db_container.kubernetes_namespace
else "",
db_container.backup_name,
db_container.backup_name.replace("/", ""),
)
)
......@@ -49,6 +49,23 @@ class BaseBackupper:
if self.compress_backup
else self.backup_file_suffix,
)
self.manager.retention_duration = {
RetentionType.DAILY: db_container.coda_labels[
ValidLabels.retention_daily
].val,
RetentionType.WEEKLY: db_container.coda_labels[
ValidLabels.retention_weekly
].val,
RetentionType.MONTHLY: db_container.coda_labels[
ValidLabels.retention_monthly
].val,
RetentionType.YEARLY: db_container.coda_labels[
ValidLabels.retention_yearly
].val,
RetentionType.MANUAL: db_container.coda_labels[
ValidLabels.retention_manual
].val,
}
self.host = db_container.coda_labels[ValidLabels.database_host].val
self.user = db_container.coda_labels[ValidLabels.database_username].val
......
#!/usr/bin/env python3
from email.policy import default
from pydoc import describe
import click
import os
......@@ -73,7 +74,7 @@ def backup_cli(debug):
"-m",
default="Docker",
prompt="How is your DB running?",
help="Environment the database is running in: 'Docker', 'kubernetes'",
help="Environment the database is running in: 'docker', 'kubernetes'",
type=click.Choice(["docker", "kubernetes"], case_sensitive=False),
)
@click.option(
......@@ -87,8 +88,8 @@ def backup_cli(debug):
"-t",
default="mysql",
prompt="Database type?",
help="Do we backup a mysql/maria db or a postgres db?",
type=click.Choice(["mysql", "postgres"], case_sensitive=False),
help="Do we backup a mysql/maria/neo4j db or a postgres db?",
type=click.Choice(["mysql", "postgres", "neo4j"], case_sensitive=False),
)
@click.option(
"--database-host",
......@@ -127,23 +128,43 @@ def backup_now(
database_password,
database_names,
):
"""Wizard to backup a specific database, now!"""
"""Wizard to backup a specific database running in a container, now!"""
container_names = container_identifier.split(",")
for container_name in container_names:
backup_name = container_name
namespace = None
if mode == "kubernetes":
namespace, container_name_ = container_name.split("/")
container_name = container_name_
# You are here. you need to find the workload ny name and then find the pod in the workload. maybe we need some more container helper funcs
container_name = next(
p
for p in ContainerHelper.kubernetes_get_pods(
namespace=namespace, describe=True
# in kubernetes mode, we expect the static workload name. now we need to query the running pod in this workload, which is the actuall container running the database
if not "/" in container_name:
raise ValueError(
f"Expected '--container-identifier' in format 'namespace/workload-name' got '{container_name}'"
)
).name
print(container_name)
exit()
namespace, workload_name = container_name.split("/")
# You are here. you need to find the workload by name and then find the pod in the workload. maybe we need some more container helper funcs
workload = next(
(
wl
for wl in ContainerHelper.kubernetes_get_workloads(
namespace=namespace, describe=True
)
if wl["metadata"]["name"] == workload_name
),
None,
)
if workload is None:
raise ValueError(
f"Can not find workload with name '{workload_name}' in namespace '{namespace}'. Cancel backup."
)
container = ContainerHelper.kubernetes_get_pods_by_workload(workload)
if len(container) != 1:
raise ValueError(
f"Workload '{workload['metadata']['namespace']}/{workload['metadata']['name']}' containers multiple pods. Dont know what to do..."
)
backup_name = workload_name
container_name = container[0].name
container = Container(
mode=mode,
......@@ -152,7 +173,7 @@ def backup_now(
backup_name=container_name,
coda_labels=ValidLabels.valid_labels_from_dict(
{
ValidLabels.backup_name.key: container_name,
ValidLabels.backup_name.key: backup_name,
ValidLabels.database_type.key: database_type,
ValidLabels.database_host.key: database_host,
ValidLabels.database_password.key: database_password,
......@@ -207,16 +228,8 @@ def backup_kubernetes(namespace, all_namespaces, target_dir):
BackupperClass = database_type_backupper_mapping[
pod.coda_labels[ValidLabels.database_type].val
]
bu = BackupperClass(pod)
bu.manager.retention_duration = {
RetentionType.DAILY: pod.coda_labels[ValidLabels.retention_daily].val,
RetentionType.WEEKLY: pod.coda_labels[ValidLabels.retention_weekly].val,
RetentionType.MONTHLY: pod.coda_labels[
ValidLabels.retention_monthly
].val,
RetentionType.YEARLY: pod.coda_labels[ValidLabels.retention_yearly].val,
RetentionType.MANUAL: pod.coda_labels[ValidLabels.retention_manual].val,
}
bu: BaseBackupper = BackupperClass(pod, target_dir)
if not pod.coda_labels[ValidLabels.database_names].val:
databases = None
else:
......@@ -251,42 +264,10 @@ def backup_docker(target_dir):
BackupperClass = database_type_backupper_mapping[
container.coda_labels[ValidLabels.database_type].val
]
bu = BackupperClass(
mode="docker",
container_name=container.name,
host=container.coda_labels[ValidLabels.database_host].val,
user=container.coda_labels[ValidLabels.database_username].val,
password=container.coda_labels[ValidLabels.database_password].val,
backups_base_path=Path(target_dir)
if target_dir
else container.coda_labels[ValidLabels.backup_dir].val,
)
bu.manager.retention_duration = {
RetentionType.DAILY: container.coda_labels[
ValidLabels.retention_daily
].val,
RetentionType.WEEKLY: container.coda_labels[
ValidLabels.retention_weekly
].val,
RetentionType.MONTHLY: container.coda_labels[
ValidLabels.retention_monthly
].val,
RetentionType.YEARLY: container.coda_labels[
ValidLabels.retention_yearly
].val,
RetentionType.MANUAL: container.coda_labels[
ValidLabels.retention_manual
].val,
}
if not container.coda_labels[ValidLabels.database_names].val:
databases = None
else:
databases = container.coda_labels[ValidLabels.database_names].val.split(
","
)
bu: BaseBackupper = BackupperClass(container, target_base_dir=target_dir)
bu.backup(
databases=databases,
databases=container.coda_labels[ValidLabels.database_names].val,
retention_type=RetentionType.DAILY,
)
bu.manager.rotate_existing_backups()
......@@ -349,8 +330,8 @@ def restore_cli(debug):
help="The Databases to backup. We can select a specific database by name . e.g. 'my_database01' or a list of databases e.b. ['mydb01', 'mydb02']. If left empty all databses accessable for the user will be backuped.",
)
@click.option(
"--backup-name",
prompt="Name of the backup?",
"--backup-path",
prompt="Which back to restore?",
help="Name of the backup as listed in `coda-restore <mode> list <database instance name>`?",
)
def restore_now(
......@@ -361,7 +342,7 @@ def restore_now(
database_user,
database_password,
database_name,
backup_name,
backup_path,
):
raise NotImplementedError
......
......@@ -143,40 +143,55 @@ class ContainerHelper:
)
pods: List[Container] = []
for workload in workloads:
if (
"spec" in workload
and "selector" in workload["spec"]
and "matchLabels" in workload["spec"]["selector"]
):
# the Workload specs.selector.matchLabels defines how pods running under this workload have to be labeled
# this provides us the information to find pods belonging to a/this specific workload
pod_selector_labels: List[Label] = Label.from_dict(
workload["spec"]["selector"]["matchLabels"]
for wl_pod in cls.kubernetes_get_pods_by_workload(workload):
# we merge the workloads backup config labels into the pod label. usally only the kubernetes workload will have the backup config labels
workload_backup_config_labels = ValidLabels.valid_labels_from_dict(
workload["metadata"]["labels"], add_missing_default_labels=True
)
for wl_pod in cls.kubernetes_get_pods(
labels=pod_selector_labels,
namespace=namespace,
all_namespaces=all_namespaces,
describe=True,
wl_pod.coda_labels = wl_pod.coda_labels | workload_backup_config_labels
if (
ValidLabels.backup_name in wl_pod.coda_labels
and not wl_pod.coda_labels[ValidLabels.backup_name].val
):
# we merge the workloads backup config labels into the pod label. usally only the kubernetes workload will have the backup config labels
workload_backup_config_labels = ValidLabels.valid_labels_from_dict(
workload["metadata"]["labels"], add_missing_default_labels=True
)
wl_pod.coda_labels = (
wl_pod.coda_labels | workload_backup_config_labels
)
# if the backup subdir is not defined by a label we override the default (pod name) with the parent workload name to get a more consistent name
wl_pod.backup_name = workload["metadata"]["name"]
pods.append(wl_pod)
return pods
@classmethod
def kubernetes_get_pods_by_workload(cls, workload: Dict):
"""[summary]
if (
ValidLabels.backup_name in wl_pod.coda_labels
and not wl_pod.coda_labels[ValidLabels.backup_name].val
):
# if the backup subdir is not defined by a label we override the default (pod name) with the parent workload name to get a more consistent name
wl_pod.backup_name = workload["metadata"]["name"]
Args:
workload (dict): workload description as outputet by `kubectl get ... -o json` e.g. `kubectl get deployment --all-namespaces --selector backup.dzd-ev.de/enabled=true -o json`
# Attach the parent workload data just for good measure... not in any use yet. maybe we can delete this step
wl_pod.parent = workload
pods.append(wl_pod)
Raises:
NotImplementedError: [description]
"""
pods: List[Container] = []
if not (
"spec" in workload
and "selector" in workload["spec"]
and "matchLabels" in workload["spec"]["selector"]
and "namespace" in workload["metadata"]
):
raise NotImplementedError(
f"Can not query pods of workload. Unknown workload format. Expected to find keys 'spec.selector.matchLabels' in Workload. got \n\n{workload}\n\n"
)
pod_selector_labels: List[Label] = Label.from_dict(
workload["spec"]["selector"]["matchLabels"]
)
for wl_pod in cls.kubernetes_get_pods(
labels=pod_selector_labels,
namespace=workload["metadata"]["namespace"],
all_namespaces=False,
describe=True,
):
# Attach the parent workload data just for good measure... not in any use yet. maybe we can delete this step
wl_pod.parent = workload
pods.append(wl_pod)
return pods
@classmethod
......@@ -278,7 +293,7 @@ class ContainerHelper:
return containers
@classmethod
def kubernetes_get_config_by_labels(cls, pod_name: str) -> Dict[Label, Label]:
def kubernetes_get_config_by_labels(cls, pod_name: str) -> Dict[str, Label]:
# config.DATABASE_CONTAINER_LABEL_BASE_KEY
container_labels: Dict = json.loads(
Executer.exec(
......@@ -295,7 +310,7 @@ class ContainerHelper:
return config_labels_dict
@classmethod
def docker_get_labels(cls, container_name: str, valid=True) -> Dict[Label, Label]:
def docker_get_labels(cls, container_name: str, valid=True) -> Dict[str, Label]:
# config.DATABASE_CONTAINER_LABEL_BASE_KEY
container_labels: Dict = json.loads(
......
......@@ -183,30 +183,6 @@ class ValidLabels:
if not a.startswith("__") and not callable(getattr(cls, a))
]
"""CAN BE REMOVED
@classmethod
def LabelFromString(cls, label_string: str) -> Label:
l = Label(label=label_string)
if cls.check_label_is_valid(l):
for validlabel in cls.iter():
if validlabel.key == l.key:
l.type = validlabel.type
l.possible_values = validlabel.possible_values
"""
"""CAN BE REMOVED
@classmethod
def check_label_is_valid(cls, label: Label) -> bool:
for validlabel in cls.iter():
if validlabel.key == label.key:
if not label.val or (
label.val
and validlabel.possible_values
and label.val in validlabel.possible_values
):
return True
"""
@classmethod
def label_from_valid_label_as_template(cls, valid_label: Label, val):
return Label(
......@@ -221,7 +197,7 @@ class ValidLabels:
@classmethod
def valid_labels_from_dict(
cls, labels: Union[List[dict], dict], add_missing_default_labels: bool = False
) -> Dict[Label, Label]:
) -> Dict[str, Label]:
if isinstance(labels, list):
# convert list of labels to one dict representing all labels
labels_as_dict = {}
......@@ -232,7 +208,7 @@ class ValidLabels:
for valid_label in cls.iter():
for key, val in labels.items():
if valid_label.key == key:
lbls[valid_label] = cls.label_from_valid_label_as_template(
lbls[valid_label.key] = cls.label_from_valid_label_as_template(
valid_label=valid_label, val=val
)
break
......@@ -245,7 +221,7 @@ class ValidLabels:
@classmethod
def non_valid_labels_from_dict(
cls, labels: Union[List[dict], dict]
) -> Dict[Label, Label]:
) -> Dict[str, Label]:
if isinstance(labels, list):
# convert list of labels to one dict representing all labels
labels_as_dict = {}
......@@ -254,7 +230,7 @@ class ValidLabels:
labels = labels_as_dict
lbls = {}
for key, val in labels.items():
if key not in [valid_label.key for valid_label in ValidLabels.iter()]:
if key not in [valid_label for valid_label in ValidLabels.iter()]:
label = Label(label={key: val})
lbls[label.key] = label
return lbls
......
......@@ -83,12 +83,13 @@ Thats it. We now have a directory `./backups/` in front of us, with all database
* (WIP) Restore Wizard
* (WIP) Neo4j support
* (WIP) Database auth via Docker/kubernetes Secrets
* (Planned) Database auth via Docker/kubernetes Secrets
* (Planned) Suppord pod with more than one container https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#opening-a-shell-when-a-pod-has-more-than-one-container
* (Planned) Create database if not existent via label conf (checked/executed when pod starts via https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/)
* (Planned) Email notification
* (Planned) Support of docker and kubernetes secrets for providing database auth
* (Planned) Docker Event listener / Kubectl hooks to react to container started/stopped
* (Idea) Database tools (Create non existing databases, based on labels)
* (Idea) restore by label (checked/executed when pod starts via https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/)
* (Idea) Matrix notifications
* (Idea) [Podman](https://podman.io/) support. Your help is greatly appreciated. Should be easy or maybe no work at all
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment