style: auto Format files with Prettier

This commit is contained in:
Alex Wellnitz 2023-09-04 11:41:11 +02:00
parent 0377f5a73e
commit 8266430503
13 changed files with 187 additions and 142 deletions

View File

@ -1,10 +1,10 @@
name: Docker Image CI
on:
push:
tags:
- '*'
workflow_dispatch:
push:
tags:
- "*"
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
@ -13,26 +13,26 @@ jobs:
build-container:
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: alexohneander/alexohneander-astro
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: .
file: ./Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- name: Check out the repo
uses: actions/checkout@v3
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: alexohneander/alexohneander-astro
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: .
file: ./Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -69,7 +69,10 @@ const { activeNav } = Astro.props;
</a>
</li>
<li>
<a href="/experience" class={activeNav === "experience" ? "active" : ""}>
<a
href="/experience"
class={activeNav === "experience" ? "active" : ""}
>
Experience
</a>
</li>

View File

@ -8,7 +8,14 @@ export interface Props {
newTarget?: boolean;
}
const { href, className, ariaLabel, title, disabled = false, newTarget = true } = Astro.props;
const {
href,
className,
ariaLabel,
title,
disabled = false,
newTarget = true,
} = Astro.props;
---
<a
@ -18,7 +25,7 @@ const { href, className, ariaLabel, title, disabled = false, newTarget = true }
aria-label={ariaLabel}
title={title}
aria-disabled={disabled}
target={ newTarget ? "_blank" : "_self"}
target={newTarget ? "_blank" : "_self"}
>
<slot />
</a>

View File

@ -12,8 +12,7 @@ tags:
- bash
- backup
ogImage: ""
description:
In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
description: In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
---
In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
@ -22,13 +21,14 @@ In our case, we do not have a managed MySQL server. But we want to backup it to
For this we first build a container that can execute our tasks, because we will certainly need several tasks to backup our cluster.
## CronJob Agent Container
First, we'll show you our Dockerfile so you know what we need.
```Dockerfile
FROM alpine:3.10
# Update
RUN apk --update add --no-cache bash nodejs-current yarn curl busybox-extras vim rsync git mysql-client openssh-client
RUN apk --update add --no-cache bash nodejs-current yarn curl busybox-extras vim rsync git mysql-client openssh-client
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl
# Scripts
@ -41,6 +41,7 @@ RUN mkdir /var/backup/mysql
```
## Backup Script
And now our backup script which the container executes.
Our script is quite simple, we get all tables with the mysql client, export them as sql file, pack them in a zip file and send them in a 8 hours interval to our NAS.
@ -89,6 +90,7 @@ rsync -avz $BACKUPDIR/backup-${NOW}.tar.gz root@$BACKUPSERVER:$BACKUPREMOTEDIR
```
## Kubernetes CronJob Deployment
Finally we show you the kubernetes deployment for our agent.
In the deployment, our agent is defined as a CronJob that runs every 8 hours.
@ -111,7 +113,7 @@ spec:
containers:
- name: cronjob-agent
image: xxx/cronjob-agent
command: ["bash", "/srv/jobs/backup-mariadb.sh"]
command: ["bash", "/srv/jobs/backup-mariadb.sh"]
volumeMounts:
- mountPath: /root/.ssh/id_rsa.pub
name: cronjob-default-config
@ -129,4 +131,4 @@ spec:
name: cronjob-default-config
defaultMode: 256
restartPolicy: Never
```
```

View File

@ -10,13 +10,13 @@ tags:
- cni
- baremetal
ogImage: ""
description:
In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation.
description: In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation.
---
In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation.
## What is a Container Network Interface (CNI)?
CNI is a network framework that allows the dynamic configuration of networking resources through a group of Go-written specifications and libraries. The specification mentioned for the plugin outlines an interface that would configure the network, provisioning the IP addresses, and mantain multi-host connectivity.
In the Kubernetes context, the CNI seamlessly integrates with the kubelet to allow automatic network configuration between pods using an underlay or overlay network. An underlay network is defined at the physical level of the networking layer composed of routers and switches. In contrast, the overlay network uses a virtual interface like VxLAN to encapsulate the network traffic.
@ -26,6 +26,7 @@ Once the network configuration type is specified, the runtime defines a network
In addition to Kubernetes networking, CNI also supports Kubernetes-based platforms like OpenShift to provide a unified container communication across the cluster through software-defined networking (SDN) approach.
### What is Cilium?
Cilium is an open-source, highly scalable Kubernetes CNI solution developed by Linux kernel developers. Cilium secures network connectivity between Kubernetes services by adding high-level application rules utilizing eBPF filtering technology. Cilium is deployed as a daemon `cilium-agent` on each node of the Kubernetes cluster to manage operations and translates the network definitions to eBPF programs.
The communication between pods happens over an overlay network or utilizing a routing protocol. Both IPv4 and IPv6 addresses are supported for cases. Overlay network implementation utilizes VXLAN tunneling for packet encapsulation while native routing happens through unencapsulated BGP protocol.
@ -37,10 +38,12 @@ Its network and application layer awareness manages packet inspection, and the a
Cilium also has support for Kubernetes Network Policies through HTTP request filters. The policy configuration can be written into a YAML or JSON file and offers both ingress and egress enforcements. Admins can accept or reject requests based on the request method or path header while integrating policies with service mesh like Istio.
### Preparation
For the installation we need the CLI from Cilium.
We can install this with the following commands:
**Mac OSx**
```bash
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-darwin-amd64.tar.gz{,.sha256sum}
shasum -a 256 -c cilium-darwin-amd64.tar.gz.sha256sum
@ -49,6 +52,7 @@ rm cilium-darwin-amd64.tar.gz{,.sha256sum}
```
**Linux**
```bash
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
@ -57,14 +61,18 @@ rm cilium-linux-amd64.tar.gz{,.sha256sum}
```
### Install Cilium
You can install Cilium on any Kubernetes cluster. These are the generic instructions on how to install Cilium into any Kubernetes cluster. The installer will attempt to automatically pick the best configuration options for you.
#### Requirements
- Kubernetes must be configured to use CNI
- Linux kernel >= 4.9.17
#### Install
Install Cilium into the Kubernetes cluster pointed to by your current kubectl context:
```bash
cilium install
```
@ -72,7 +80,9 @@ cilium install
If the installation fails for some reason, run `cilium status` to retrieve the overall status of the Cilium deployment and inspect the logs of whatever pods are failing to be deployed.
### Validate the Installation
To validate that Cilium has been properly installed, you can run
```bash
$ cilium status --wait
/¯¯\
@ -91,6 +101,7 @@ Image versions cilium quay.io/cilium/cilium:v1.9.5: 2
```
Run the following command to validate that your cluster has proper network connectivity:
```bash
$ cilium connectivity test
Monitor aggregation detected, will skip some flow validation steps
@ -101,4 +112,5 @@ $ cilium connectivity test
---------------------------------------------------------------------------------------------------------------------
✅ 69/69 tests successful (0 warnings)
```
Congratulations! You have a fully functional Kubernetes cluster with Cilium. 🎉
Congratulations! You have a fully functional Kubernetes cluster with Cilium. 🎉

View File

@ -10,8 +10,7 @@ tags:
- openvpn
- google
ogImage: ""
description:
In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network.
description: In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network.
---
In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network.
@ -23,13 +22,17 @@ We need 2 virtual machines. The first one on the side of our office and the othe
#### Setup OpenVPN Clients
##### Site-to-Site Client Office Side
We need to install OpenVPN, we do it as follows:
```bash
apt install openvpn -y
```
After that we add our OpenVPN configuration under this path `/etc/openvpn/s2s.conf`.
*s2s.conf*
_s2s.conf_
```
# Use a dynamic tun device.
# For Linux 2.2 or non-Linux OSes,
@ -40,14 +43,14 @@ After that we add our OpenVPN configuration under this path `/etc/openvpn/s2s.co
dev tun
# Our OpenVPN peer is the Google gateway.
remote IP_GOOGLE_VPN_CLIENT
remote IP_GOOGLE_VPN_CLIENT
ifconfig 4.1.0.2 4.1.0.1
route 10.156.0.0 255.255.240.0 # Google Cloud VM Network
route 10.24.0.0 255.252.0.0 # Google Kubernetes Pod Network
push "route 192.168.10.0 255.255.255.0" # Office Network
push "route 192.168.10.0 255.255.255.0" # Office Network
# Our pre-shared static key
#secret static.key
@ -78,11 +81,15 @@ verb 3
log /etc/openvpn/s2s.log
```
We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line:
```
net.ipv4.ip_forward=1
```
We can then start our OpenVPN client with this command:
```bash
systemctl start openvpn@s2s
```
@ -90,11 +97,13 @@ systemctl start openvpn@s2s
On the Office side we have to open the port for the OpenVPN client that the other side can connect.
##### Site-to-Site Client Google Side
When setting up the OpenVPN client on Google's site, we need to consider the following settings when creating it. When we create the machine, we need to enable this option in the network settings:
![Google Cloud Network Settings](https://i.imgur.com/OXEkhxo.png)
Also on this side we have to install the OpenVPN client again and then add this config under the path `/etc/openvpn/s2s.conf`:
```
# Use a dynamic tun device.
# For Linux 2.2 or non-Linux OSes,
@ -105,7 +114,7 @@ Also on this side we have to install the OpenVPN client again and then add this
dev tun
# Our OpenVPN peer is the Office gateway.
remote IP_OFFICE_VPN_CLIENT
remote IP_OFFICE_VPN_CLIENT
ifconfig 4.1.0.2 4.1.0.1
@ -143,12 +152,15 @@ verb 3
log /etc/openvpn/s2s.log
```
We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line:
```
net.ipv4.ip_forward=1
```
##### Connection test
Now that both clients are basically configured we can test the connection. Both clients have to be started with systemctl. After that we look at the logs with `tail -f /etc/openvpn/s2s-log` and wait for this message:
```
@ -167,6 +179,7 @@ Wed May 5 08:28:12 2021 Initialization Sequence Completed
If we can't establish a connection, we need to check if the ports are opened on both sides.
#### Routing Google Cloud Network
After our clients have finished installing and configuring, we need to set the routes on Google. I will not map the Office side, as this is always different. But you have to route the networks for the Google network there as well.
To set the route on Google we go to the network settings and then to Routes. Here you have to specify your office network so that the clients in the Google network know what to do.
@ -174,6 +187,7 @@ To set the route on Google we go to the network settings and then to Routes. Her
![Google Cloud Network Route](https://i.imgur.com/6Q2Drf4.png)
#### IP-Masquerade-Agent
IP masquerading is a form of network address translation (NAT) used to perform many-to-one IP address translations, which allows multiple clients to access a destination using a single IP address. A GKE cluster uses IP masquerading so that destinations outside of the cluster only receive packets from node IP addresses instead of Pod IP addresses. This is useful in environments that expect to only receive packets from node IP addresses.
You have to edit the ip-masq-agent and this configuration is responsible for letting the pods inside the nodes, reach other parts of the GCP VPC Network, more specifically the VPN. So, it allows pods to communicate with the devices that are accessible through the VPN.
@ -182,11 +196,12 @@ First of all we're gonna be working inside the kube-system namespace, and we're
```yaml
nonMasqueradeCIDRs:
- 10.24.0.0/14 # The IPv4 CIDR the cluster is using for Pods (required)
- 10.24.0.0/14 # The IPv4 CIDR the cluster is using for Pods (required)
- 10.156.0.0/20 # The IPv4 CIDR of the subnetwork the cluster is using for Nodes (optional, works without but I guess its better with it)
masqLinkLocal: false
resyncInterval: 60s
```
and run `kubectl create configmap ip-masq-agent --from-file config --namespace kube-system`
afterwards, configure the ip-masq-agent, put this in a `ip-masq-agent.yml` file:
@ -205,17 +220,17 @@ spec:
spec:
hostNetwork: true
containers:
- name: ip-masq-agent
image: gcr.io/google-containers/ip-masq-agent-amd64:v2.4.1
args:
- name: ip-masq-agent
image: gcr.io/google-containers/ip-masq-agent-amd64:v2.4.1
args:
- --masq-chain=IP-MASQ
# To non-masquerade reserved IP ranges by default, uncomment the line below.
# - --nomasq-all-reserved-ranges
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /etc/config
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
@ -227,14 +242,14 @@ spec:
- key: config
path: ip-masq-agent
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: "CriticalAddonsOnly"
operator: "Exists"
```
and run `kubectl -n kube-system apply -f ip-masq-agent.yml`.
Now our site-to-site VPN should be set up. You should now test if you can ping the pods and if all other services work as you expect them to.
Now our site-to-site VPN should be set up. You should now test if you can ping the pods and if all other services work as you expect them to.

View File

@ -10,9 +10,8 @@ tags:
- network
- haproxy
ogImage: ""
description:
To briefly explain the situation.
We have a **HAProxy** running on a Debian server as a Docker container. This is the entrance node to a **Docker Swarm** cluster.
description: To briefly explain the situation.
We have a **HAProxy** running on a Debian server as a Docker container. This is the entrance node to a **Docker Swarm** cluster.
---
To briefly explain the situation:
@ -40,4 +39,4 @@ With this setup, we get overhead into the system that we don't need. We have an
If we use a lot of micro services it is important that we use something like Docker, because then we can share the kernel and it makes the deployment much easier.
But if we have only one application that is very important, it is better to keep it simple.
But if we have only one application that is very important, it is better to keep it simple.

View File

@ -10,8 +10,7 @@ tags:
- linux
- backup
ogImage: ""
description:
Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease.
description: Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease.
---
Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease.
@ -19,14 +18,17 @@ Since we all know that the first rule is "no backup, no pity", I'll show you how
If you do not want to use a second computer, but an external hard drive, you can adjust this later in the script and ignore the points in the instructions for the second computer.
### Requirements
- 2 Linux Computers
- Borg
- SSH
- Storage
- More than 5 brain cells
- 2 Linux Computers
- Borg
- SSH
- Storage
- More than 5 brain cells
### Installation
First we need to install borg on both computers so that we can back up on one and save on the other.
```bash
sudo apt install borgbackup
```
@ -34,11 +36,13 @@ sudo apt install borgbackup
Then we create a Borg repository. We can either use an external target or a local path.
**External Target:**
```bash
borg init --encryption=repokey ssh://user@192.168.2.42:22/mnt/backup/borg
```
**Local Path:**
```bash
borg init --encryption=repokey /path/to/backup_folder
```
@ -49,6 +53,7 @@ This way you don't have to enter a password and is simply nicer from my point of
Once you have created everything and prepared the script with your parameters, I recommend that you run the script as a CronJob so that you no longer have to remember to back up your things yourself.
**crontab example:**
```bash
#Minute Hour Day Month Day(Week) command
#(0-59) (0-23) (1-31) (1-12) (1-7;1=Mo)
@ -56,6 +61,7 @@ Once you have created everything and prepared the script with your parameters, I
```
### Automated script
```bash
#!/bin/sh
@ -113,6 +119,7 @@ exit ${global_exit}
```
### Get your data from the backup
First, we create a temporary directory in which we can mount the backup.
```bash
@ -125,7 +132,9 @@ At this point you must remember that you can use an external destination or a lo
```bash
borg mount ssh://user@192.168.2.42/mnt/backup/borg /tmp/borg-backup
```
Once our repo is mounted, we can change into the directory and restore files via **rsync** or **cp**.
### Conclusion
I hope you could understand everything and now secure your shit sensibly. Because without a backup we are all lost!
I hope you could understand everything and now secure your shit sensibly. Because without a backup we are all lost!

View File

@ -13,7 +13,8 @@ export interface Props {
const { post } = Astro.props;
const { title, author, description, ogImage, canonicalURL, pubDatetime, tags } = post.data;
const { title, author, description, ogImage, canonicalURL, pubDatetime, tags } =
post.data;
const { Content } = await post.render();
@ -21,7 +22,13 @@ const ogUrl = new URL(ogImage ? ogImage : `${title}.png`, Astro.url.origin)
.href;
---
<Layout title={title} author={author} description={description} ogImage={ogUrl} canonicalURL={canonicalURL}>
<Layout
title={title}
author={author}
description={description}
ogImage={ogUrl}
canonicalURL={canonicalURL}
>
<Header />
<div class="mx-auto flex w-full max-w-3xl justify-start px-2">
<button

View File

@ -4,62 +4,68 @@ title: "Experience"
---
### DevOps Engineer, Materna SE
**since 2023**
As a key globally active IT service provider, Materna advise and assist you in all aspects of digitization and provide tailor-made technologies for agile, flexible and secure IT.
- **Infrastructure as Code (IaC)**:
- Develop and maintain infrastructure as code scripts using tools like Terraform, Ansible, or CloudFormation to automate the provisioning of infrastructure resources.
- **Continuous Integration (CI) and Continuous Deployment (CD)**:
- Implement and manage CI/CD pipelines using tools like Jenkins, Travis CI, or GitLab CI to automate the software delivery process.
- **Containerization and Orchestration**:
- Work with Docker containers and container orchestration platforms like Kubernetes to improve scalability and resource utilization.
- **Monitoring and Logging**:
- Set up monitoring and logging solutions (e.g., Prometheus, ELK Stack) to track application performance, identify issues, and troubleshoot problems proactively.
- **Collaboration and Communication**:
- Foster collaboration between development and operations teams, ensuring effective communication and knowledge sharing.
- **Infrastructure Optimization**:
- Analyze and optimize infrastructure costs, resource utilization, and performance to achieve cost-efficiency and scalability.
- **Troubleshooting and Support**:
- Respond to incidents, diagnose problems, and provide support to ensure system reliability and availability.
- **Infrastructure as Code (IaC)**:
- Develop and maintain infrastructure as code scripts using tools like Terraform, Ansible, or CloudFormation to automate the provisioning of infrastructure resources.
- **Continuous Integration (CI) and Continuous Deployment (CD)**:
- Implement and manage CI/CD pipelines using tools like Jenkins, Travis CI, or GitLab CI to automate the software delivery process.
- **Containerization and Orchestration**:
- Work with Docker containers and container orchestration platforms like Kubernetes to improve scalability and resource utilization.
- **Monitoring and Logging**:
- Set up monitoring and logging solutions (e.g., Prometheus, ELK Stack) to track application performance, identify issues, and troubleshoot problems proactively.
- **Collaboration and Communication**:
- Foster collaboration between development and operations teams, ensuring effective communication and knowledge sharing.
- **Infrastructure Optimization**:
- Analyze and optimize infrastructure costs, resource utilization, and performance to achieve cost-efficiency and scalability.
- **Troubleshooting and Support**:
- Respond to incidents, diagnose problems, and provide support to ensure system reliability and availability.
### DevOps Engineer, Apozin GmbH
**until 2023**
Apozin turns visions into a competitive advantage. Our team of pharmacists, PTA's, graphic designers, web designers, sales professionals, marketing specialists, and programmers realize holistic concepts that we constantly evolve and improve for our clients.
- Operation and design of Kubernetes clusters at multiple locations
- Design and implementation of backup strategies
- Deployment of various services (including HAProxy, MariaDB, MongoDB, Elasticsearch, NGINX)
- Design and operation of comprehensive monitoring solutions (Zabbix, Grafana, Prometheus, Graylog)
- Design and setup of build pipelines with Jenkins, Docker, and FluxCD
- Administration of various servers in different environments (Google Cloud, Hetzner, AWS, Digital Ocean, Hosting.de)
- Operation and design of Kubernetes clusters at multiple locations
- Design and implementation of backup strategies
- Deployment of various services (including HAProxy, MariaDB, MongoDB, Elasticsearch, NGINX)
- Design and operation of comprehensive monitoring solutions (Zabbix, Grafana, Prometheus, Graylog)
- Design and setup of build pipelines with Jenkins, Docker, and FluxCD
- Administration of various servers in different environments (Google Cloud, Hetzner, AWS, Digital Ocean, Hosting.de)
### Fullstack .Net Developer, prointernet
**until 2019**
Agency for internet and design founded in 1998, established in Kastellaun in the Hunsrück region, operating worldwide, and at home on the internet. A team of designers, developers, and consultants who love what they do.
- Development of web applications (C#, Dotnet, JS)
- Design of websites (Composite C1)
- Company Website
- Development of web applications (C#, Dotnet, JS)
- Design of websites (Composite C1)
- Company Website
## Projects
### DevOps Engineer, Amamed
**until 2023**
Just right for your pharmacy! amamed is the only digital solution on the market that puts your pharmacy at the center and makes you fully equipped, secure, and flexible online.
- Provision of various services (including reverse proxies, databases, load balancers)
- Operation of Docker Swarm clusters
- Product Website
- Provision of various services (including reverse proxies, databases, load balancers)
- Operation of Docker Swarm clusters
- Product Website
### DevOps Engineer, deineApotheke
**until 2021**
"deine Apotheke" supports the pharmacies in your neighborhood and paves the way for you to access pharmacy services: through our app, you can select your pharmacy and pre-order medications, even with a prescription.
- Provision of various services (including backend APIs, MariaDB clusters, NATs, Redis)
- Design and operation of Kubernetes clusters (3 locations)
- Management of automated pipelines via Bitbucket Pipelines (continuous integration)
- IT administration for 6 individuals (SysOps)
- Provision of various services (including backend APIs, MariaDB clusters, NATs, Redis)
- Design and operation of Kubernetes clusters (3 locations)
- Management of automated pipelines via Bitbucket Pipelines (continuous integration)
- IT administration for 6 individuals (SysOps)

View File

@ -41,7 +41,10 @@ const socialCount = SOCIALS.filter(social => social.active).length;
</a>
<p>
I'm Alex, a DevOps architect and software developer. I currently hold the role of DevOps Engineer at Materna, where I assist developers in accelerating web performance and provide guidance on various topics such as web development, Kubernetes, network security, and more.
I'm Alex, a DevOps architect and software developer. I currently hold
the role of DevOps Engineer at Materna, where I assist developers in
accelerating web performance and provide guidance on various topics such
as web development, Kubernetes, network security, and more.
</p>
<!-- <p>
Read the blog posts or check

View File

@ -1,50 +1,48 @@
/* ibm-plex-mono-regular - latin */
@font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: 'IBM Plex Mono';
font-family: "IBM Plex Mono";
font-style: normal;
font-weight: 400;
src: url('/fonts/ibm-plex-mono-v19-latin-regular.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
src: url("/fonts/ibm-plex-mono-v19-latin-regular.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
}
/* ibm-plex-mono-500 - latin */
@font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: 'IBM Plex Mono';
font-family: "IBM Plex Mono";
font-style: normal;
font-weight: 500;
src: url('/fonts/ibm-plex-mono-v19-latin-500.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
src: url("/fonts/ibm-plex-mono-v19-latin-500.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
}
/* ibm-plex-mono-600 - latin */
@font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: 'IBM Plex Mono';
font-family: "IBM Plex Mono";
font-style: normal;
font-weight: 600;
src: url('/fonts/ibm-plex-mono-v19-latin-600.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
src: url("/fonts/ibm-plex-mono-v19-latin-600.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
}
/* ibm-plex-mono-600italic - latin */
@font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: 'IBM Plex Mono';
font-family: "IBM Plex Mono";
font-style: italic;
font-weight: 600;
src: url('/fonts/ibm-plex-mono-v19-latin-600italic.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
src: url("/fonts/ibm-plex-mono-v19-latin-600italic.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
}
/* ibm-plex-mono-700 - latin */
@font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: 'IBM Plex Mono';
font-family: "IBM Plex Mono";
font-style: normal;
font-weight: 700;
src: url('/fonts/ibm-plex-mono-v19-latin-700.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
src: url("/fonts/ibm-plex-mono-v19-latin-700.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
}
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
:root,
html[data-theme="light"] {

View File

@ -4,30 +4,14 @@
"baseUrl": "src",
"jsx": "react-jsx",
"paths": {
"@assets/*": [
"assets/*"
],
"@config": [
"config.ts"
],
"@components/*": [
"components/*"
],
"@content/*": [
"content/*"
],
"@layouts/*": [
"layouts/*"
],
"@pages/*": [
"pages/*"
],
"@styles/*": [
"styles/*"
],
"@utils/*": [
"utils/*"
]
"@assets/*": ["assets/*"],
"@config": ["config.ts"],
"@components/*": ["components/*"],
"@content/*": ["content/*"],
"@layouts/*": ["layouts/*"],
"@pages/*": ["pages/*"],
"@styles/*": ["styles/*"],
"@utils/*": ["utils/*"]
}
}
}
}