Compare commits

..

No commits in common. "main" and "v2.3.6" have entirely different histories.
main ... v2.3.6

22 changed files with 3435 additions and 7943 deletions

View File

@ -1,11 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"

View File

@ -1,10 +1,10 @@
name: Docker Image CI name: Docker Image CI
on: on:
push: push:
tags: tags:
- "*" - '*'
workflow_dispatch: workflow_dispatch:
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@ -13,26 +13,26 @@ jobs:
build-container: build-container:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Check out the repo - name: Check out the repo
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Log in to Docker Hub - name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker - name: Extract metadata (tags, labels) for Docker
id: meta id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with: with:
images: alexohneander/alexohneander-astro images: alexohneander/alexohneander-astro
- name: Build and push Docker image - name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with: with:
context: . context: .
file: ./Dockerfile file: ./Dockerfile
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}

View File

@ -2,7 +2,7 @@ image:
repository: alexohneander/alexohneander-astro repository: alexohneander/alexohneander-astro
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion. # Overrides the image tag whose default is the chart appVersion.
tag: "v2.4.0" tag: "v2.3.5"
ingress: ingress:
enabled: true enabled: true
@ -11,24 +11,12 @@ ingress:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true" kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt-prod cert-manager.io/cluster-issuer: letsencrypt-prod
#nginx.ingress.kubernetes.io/configuration-snippet : | nginx.ingress.kubernetes.io/configuration-snippet : |
# if ($request_uri ~* \.(gif|jpe?g|png|woff2)) { if ($request_uri ~* \.(gif|jpe?g|png|woff2)) {
# expires 1M; expires 1M;
# add_header Cache-Control "public"; add_header Cache-Control "public";
# } }
hosts: hosts:
- host: www.wellnitz-alex.de
paths:
- path: /
pathType: Prefix
- host: www.alexohneander.de
paths:
- path: /
pathType: Prefix
- host: wellnitz-alex.de
paths:
- path: /
pathType: Prefix
- host: alexohneander.de - host: alexohneander.de
paths: paths:
- path: / - path: /
@ -37,6 +25,3 @@ ingress:
- secretName: alexohneander-tls - secretName: alexohneander-tls
hosts: hosts:
- alexohneander.de - alexohneander.de
- wellnitz-alex.de
- www.alexohneander.de
- www.wellnitz-alex.de

10846
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{ {
"name": "alexohneander-astro", "name": "alexohneander-astro",
"version": "2.3.10", "version": "2.3.6",
"scripts": { "scripts": {
"dev": "astro check --watch & astro dev", "dev": "astro check --watch & astro dev",
"start": "astro dev", "start": "astro dev",
@ -17,7 +17,7 @@
"dependencies": { "dependencies": {
"@astrojs/rss": "^2.4.1", "@astrojs/rss": "^2.4.1",
"@resvg/resvg-js": "^2.4.1", "@resvg/resvg-js": "^2.4.1",
"astro": "^4.16.4", "astro": "^2.4.5",
"fuse.js": "^6.6.2", "fuse.js": "^6.6.2",
"github-slugger": "^2.0.0", "github-slugger": "^2.0.0",
"remark-collapse": "^0.1.2", "remark-collapse": "^0.1.2",
@ -27,8 +27,8 @@
}, },
"devDependencies": { "devDependencies": {
"@astrojs/react": "^2.1.3", "@astrojs/react": "^2.1.3",
"@astrojs/sitemap": "^1.4.0", "@astrojs/sitemap": "^1.3.1",
"@astrojs/tailwind": "^5.1.2", "@astrojs/tailwind": "^3.1.2",
"@divriots/jampack": "^0.11.2", "@divriots/jampack": "^0.11.2",
"@tailwindcss/typography": "^0.5.9", "@tailwindcss/typography": "^0.5.9",
"@types/github-slugger": "^1.3.0", "@types/github-slugger": "^1.3.0",

View File

@ -69,10 +69,7 @@ const { activeNav } = Astro.props;
</a> </a>
</li> </li>
<li> <li>
<a <a href="/experience" class={activeNav === "experience" ? "active" : ""}>
href="/experience"
class={activeNav === "experience" ? "active" : ""}
>
Experience Experience
</a> </a>
</li> </li>

View File

@ -8,14 +8,7 @@ export interface Props {
newTarget?: boolean; newTarget?: boolean;
} }
const { const { href, className, ariaLabel, title, disabled = false, newTarget = true } = Astro.props;
href,
className,
ariaLabel,
title,
disabled = false,
newTarget = true,
} = Astro.props;
--- ---
<a <a
@ -25,7 +18,7 @@ const {
aria-label={ariaLabel} aria-label={ariaLabel}
title={title} title={title}
aria-disabled={disabled} aria-disabled={disabled}
target={newTarget ? "_blank" : "_self"} target={ newTarget ? "_blank" : "_self"}
> >
<slot /> <slot />
</a> </a>

View File

@ -17,7 +17,6 @@ const { centered = false } = Astro.props;
href={social.href} href={social.href}
className="link-button" className="link-button"
title={social.linkTitle} title={social.linkTitle}
> >
<Fragment set:html={socialIcons[social.name]} /> <Fragment set:html={socialIcons[social.name]} />
</LinkButton> </LinkButton>

View File

@ -3,7 +3,7 @@ import type { Site, SocialObjects } from "./types";
export const SITE: Site = { export const SITE: Site = {
website: "https://astro-paper.pages.dev/", website: "https://astro-paper.pages.dev/",
author: "Alex Wellnitz", author: "Alex Wellnitz",
desc: "Alex Wellnitz DevOps Architect | Software Developer | Kubernetes Expert | Network Security | Web Performance Optimization. Accelerating web performance and modernizing application delivery.", desc: "A minimal, responsive and SEO-friendly Astro blog theme.",
title: "Alexohneander", title: "Alexohneander",
subtitle: "Engineering Chaos", subtitle: "Engineering Chaos",
ogImage: "astropaper-og.jpg", ogImage: "astropaper-og.jpg",
@ -137,8 +137,8 @@ export const SOCIALS: SocialObjects = [
}, },
{ {
name: "Mastodon", name: "Mastodon",
href: "https://mastodon.social/@alexohneander", href: "https://github.com/satnaing/astro-paper",
linkTitle: `${SITE.title} on Mastodon`, linkTitle: `${SITE.title} on Mastodon`,
active: true, active: false,
}, },
]; ];

View File

@ -12,7 +12,8 @@ tags:
- bash - bash
- backup - backup
ogImage: "" ogImage: ""
description: In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs. description:
In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
--- ---
In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs. In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
@ -21,14 +22,13 @@ In our case, we do not have a managed MySQL server. But we want to backup it to
For this we first build a container that can execute our tasks, because we will certainly need several tasks to backup our cluster. For this we first build a container that can execute our tasks, because we will certainly need several tasks to backup our cluster.
## CronJob Agent Container ## CronJob Agent Container
First, we'll show you our Dockerfile so you know what we need. First, we'll show you our Dockerfile so you know what we need.
```Dockerfile ```Dockerfile
FROM alpine:3.10 FROM alpine:3.10
# Update # Update
RUN apk --update add --no-cache bash nodejs-current yarn curl busybox-extras vim rsync git mysql-client openssh-client RUN apk --update add --no-cache bash nodejs-current yarn curl busybox-extras vim rsync git mysql-client openssh-client
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl
# Scripts # Scripts
@ -41,7 +41,6 @@ RUN mkdir /var/backup/mysql
``` ```
## Backup Script ## Backup Script
And now our backup script which the container executes. And now our backup script which the container executes.
Our script is quite simple, we get all tables with the mysql client, export them as sql file, pack them in a zip file and send them in a 8 hours interval to our NAS. Our script is quite simple, we get all tables with the mysql client, export them as sql file, pack them in a zip file and send them in a 8 hours interval to our NAS.
@ -90,7 +89,6 @@ rsync -avz $BACKUPDIR/backup-${NOW}.tar.gz root@$BACKUPSERVER:$BACKUPREMOTEDIR
``` ```
## Kubernetes CronJob Deployment ## Kubernetes CronJob Deployment
Finally we show you the kubernetes deployment for our agent. Finally we show you the kubernetes deployment for our agent.
In the deployment, our agent is defined as a CronJob that runs every 8 hours. In the deployment, our agent is defined as a CronJob that runs every 8 hours.
@ -113,7 +111,7 @@ spec:
containers: containers:
- name: cronjob-agent - name: cronjob-agent
image: xxx/cronjob-agent image: xxx/cronjob-agent
command: ["bash", "/srv/jobs/backup-mariadb.sh"] command: ["bash", "/srv/jobs/backup-mariadb.sh"]
volumeMounts: volumeMounts:
- mountPath: /root/.ssh/id_rsa.pub - mountPath: /root/.ssh/id_rsa.pub
name: cronjob-default-config name: cronjob-default-config
@ -131,4 +129,4 @@ spec:
name: cronjob-default-config name: cronjob-default-config
defaultMode: 256 defaultMode: 256
restartPolicy: Never restartPolicy: Never
``` ```

View File

@ -10,13 +10,13 @@ tags:
- cni - cni
- baremetal - baremetal
ogImage: "" ogImage: ""
description: In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation. description:
In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation.
--- ---
In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation. In a freshly set up Kubernetes cluster, we need a so-called CNI. This CNI is not always present after installation.
## What is a Container Network Interface (CNI)? ## What is a Container Network Interface (CNI)?
CNI is a network framework that allows the dynamic configuration of networking resources through a group of Go-written specifications and libraries. The specification mentioned for the plugin outlines an interface that would configure the network, provisioning the IP addresses, and mantain multi-host connectivity. CNI is a network framework that allows the dynamic configuration of networking resources through a group of Go-written specifications and libraries. The specification mentioned for the plugin outlines an interface that would configure the network, provisioning the IP addresses, and mantain multi-host connectivity.
In the Kubernetes context, the CNI seamlessly integrates with the kubelet to allow automatic network configuration between pods using an underlay or overlay network. An underlay network is defined at the physical level of the networking layer composed of routers and switches. In contrast, the overlay network uses a virtual interface like VxLAN to encapsulate the network traffic. In the Kubernetes context, the CNI seamlessly integrates with the kubelet to allow automatic network configuration between pods using an underlay or overlay network. An underlay network is defined at the physical level of the networking layer composed of routers and switches. In contrast, the overlay network uses a virtual interface like VxLAN to encapsulate the network traffic.
@ -26,7 +26,6 @@ Once the network configuration type is specified, the runtime defines a network
In addition to Kubernetes networking, CNI also supports Kubernetes-based platforms like OpenShift to provide a unified container communication across the cluster through software-defined networking (SDN) approach. In addition to Kubernetes networking, CNI also supports Kubernetes-based platforms like OpenShift to provide a unified container communication across the cluster through software-defined networking (SDN) approach.
### What is Cilium? ### What is Cilium?
Cilium is an open-source, highly scalable Kubernetes CNI solution developed by Linux kernel developers. Cilium secures network connectivity between Kubernetes services by adding high-level application rules utilizing eBPF filtering technology. Cilium is deployed as a daemon `cilium-agent` on each node of the Kubernetes cluster to manage operations and translates the network definitions to eBPF programs. Cilium is an open-source, highly scalable Kubernetes CNI solution developed by Linux kernel developers. Cilium secures network connectivity between Kubernetes services by adding high-level application rules utilizing eBPF filtering technology. Cilium is deployed as a daemon `cilium-agent` on each node of the Kubernetes cluster to manage operations and translates the network definitions to eBPF programs.
The communication between pods happens over an overlay network or utilizing a routing protocol. Both IPv4 and IPv6 addresses are supported for cases. Overlay network implementation utilizes VXLAN tunneling for packet encapsulation while native routing happens through unencapsulated BGP protocol. The communication between pods happens over an overlay network or utilizing a routing protocol. Both IPv4 and IPv6 addresses are supported for cases. Overlay network implementation utilizes VXLAN tunneling for packet encapsulation while native routing happens through unencapsulated BGP protocol.
@ -38,12 +37,10 @@ Its network and application layer awareness manages packet inspection, and the a
Cilium also has support for Kubernetes Network Policies through HTTP request filters. The policy configuration can be written into a YAML or JSON file and offers both ingress and egress enforcements. Admins can accept or reject requests based on the request method or path header while integrating policies with service mesh like Istio. Cilium also has support for Kubernetes Network Policies through HTTP request filters. The policy configuration can be written into a YAML or JSON file and offers both ingress and egress enforcements. Admins can accept or reject requests based on the request method or path header while integrating policies with service mesh like Istio.
### Preparation ### Preparation
For the installation we need the CLI from Cilium. For the installation we need the CLI from Cilium.
We can install this with the following commands: We can install this with the following commands:
**Mac OSx** **Mac OSx**
```bash ```bash
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-darwin-amd64.tar.gz{,.sha256sum} curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-darwin-amd64.tar.gz{,.sha256sum}
shasum -a 256 -c cilium-darwin-amd64.tar.gz.sha256sum shasum -a 256 -c cilium-darwin-amd64.tar.gz.sha256sum
@ -52,7 +49,6 @@ rm cilium-darwin-amd64.tar.gz{,.sha256sum}
``` ```
**Linux** **Linux**
```bash ```bash
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum} curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
@ -61,18 +57,14 @@ rm cilium-linux-amd64.tar.gz{,.sha256sum}
``` ```
### Install Cilium ### Install Cilium
You can install Cilium on any Kubernetes cluster. These are the generic instructions on how to install Cilium into any Kubernetes cluster. The installer will attempt to automatically pick the best configuration options for you. You can install Cilium on any Kubernetes cluster. These are the generic instructions on how to install Cilium into any Kubernetes cluster. The installer will attempt to automatically pick the best configuration options for you.
#### Requirements #### Requirements
- Kubernetes must be configured to use CNI - Kubernetes must be configured to use CNI
- Linux kernel >= 4.9.17 - Linux kernel >= 4.9.17
#### Install #### Install
Install Cilium into the Kubernetes cluster pointed to by your current kubectl context: Install Cilium into the Kubernetes cluster pointed to by your current kubectl context:
```bash ```bash
cilium install cilium install
``` ```
@ -80,9 +72,7 @@ cilium install
If the installation fails for some reason, run `cilium status` to retrieve the overall status of the Cilium deployment and inspect the logs of whatever pods are failing to be deployed. If the installation fails for some reason, run `cilium status` to retrieve the overall status of the Cilium deployment and inspect the logs of whatever pods are failing to be deployed.
### Validate the Installation ### Validate the Installation
To validate that Cilium has been properly installed, you can run To validate that Cilium has been properly installed, you can run
```bash ```bash
$ cilium status --wait $ cilium status --wait
/¯¯\ /¯¯\
@ -101,7 +91,6 @@ Image versions cilium quay.io/cilium/cilium:v1.9.5: 2
``` ```
Run the following command to validate that your cluster has proper network connectivity: Run the following command to validate that your cluster has proper network connectivity:
```bash ```bash
$ cilium connectivity test $ cilium connectivity test
Monitor aggregation detected, will skip some flow validation steps Monitor aggregation detected, will skip some flow validation steps
@ -112,5 +101,4 @@ $ cilium connectivity test
--------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------
✅ 69/69 tests successful (0 warnings) ✅ 69/69 tests successful (0 warnings)
``` ```
Congratulations! You have a fully functional Kubernetes cluster with Cilium. 🎉
Congratulations! You have a fully functional Kubernetes cluster with Cilium. 🎉

View File

@ -1,90 +0,0 @@
---
author: Alex Wellnitz
pubDatetime: 2023-11-03T19:20:50+02:00
title: Highly scalable Minecraft cluster
postSlug: highly-scalable-minecraft-cluster
featured: true
draft: false
tags:
- kubernetes
- minecraft
- cluster
ogImage: ""
description: How to build and configure a highly scalable Minecraft server
---
Are you planning a very large Minecraft LAN party? Then this article is for you. Here I show you how to set up a highly scalable Minecraft cluster.
### What is a Minecraft cluster?
A Minecraft cluster is a Minecraft server network that consists of multiple Minecraft servers. These servers are connected to each other via a network and can therefore be shared. This means that you can play with your friends on a server that consists of multiple servers.
### How does a Minecraft cluster work?
A Minecraft cluster consists of several components.
<!-- Image -->
![Minecraft cluster](https://github.com/MultiPaper/MultiPaper/raw/main/assets/multipaper-diagram.jpg)
#### Master database
First, there is the master database. This database allows servers to store data in a central location that all servers can access. Servers store chunks, maps, level.dat, player data, banned players, and more in this database. This database also records which chunk belongs to which server and coordinates communication between servers.
#### Server
The master database is great for storing data, but not so good at synchronizing data in real time between servers. This is where peer-to-peer communication comes in. Each server establishes a connection to another server so that data between them can be updated in real time. When a player on server A attacks another player on server B, server A sends this data directly to server B so that server B can damage the player and apply any knockback.
#### Load Balancer
The load balancer is the last component of the cluster. A load balancer is required to distribute players evenly across your servers. A load balancer automatically distributes players between servers to distribute the load evenly across the individual servers.
### Why do I need multiple servers?
By having multiple servers, we can distribute the load across multiple servers. This means that we can have more players on our servers without the servers becoming overloaded. With this setup, we can also easily add new servers if we get more players. If the number of players decreases again, the server can be removed again.
## Preparation
You should be familiar with Kubernetes and have set up a Kubernetes cluster. I recommend [k3s](https://k3s.io/).
You should also be familiar with Helm. I recommend [Helm 3](https://helm.sh/docs/intro/install/).
## Installation
First, you should clone the repository.
```bash
git clone git@github.com:alexohneander/MultiPaperHelm.git
cd MultiPaperHelm/
```
I installed the entire setup in a separate namespace. You can create this namespace with the following command.
```bash
kubectl create namespace minecraft
```
Next, we install the Minecraft cluster with Helm.
```bash
helm install multipaper . --namespace minecraf
```
Once the Helm chart is installed, you can view the port of the proxy service.
```bash
kubectl describe service multipaper-master-proxy -n minecraft
```
This port is the port that you need to enter in your Minecraft client.
## Configuration
The Helm chart creates several ConfigMaps. In these ConfigMaps, you can customize the configuration of your cluster.
For example, you can set the number of maximum players or change the description of the server.
For more information on the individual config files, see [MultiPaper](https://github.com/MultiPaper/MultiPaper).
## Conclusion
With this setup, you can easily set up a highly scalable Minecraft cluster. You can easily add new servers if you get more players and remove them again if the number of players decreases again.
You can test this setup under the following Server Address: `minecraft.alexohneander.de:31732`
If you have any questions, feel free to contact me on [Email](mailto:moin@wellnitz-alex.de) or on [Matrix](https://matrix.to/#/@alexohneander:dev-null.rocks).

View File

@ -10,7 +10,8 @@ tags:
- openvpn - openvpn
- google - google
ogImage: "" ogImage: ""
description: In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network. description:
In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network.
--- ---
In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network. In this tutorial I will try to explain you briefly and concisely how you can set up a site-to-site VPN for the Google Cloud Network.
@ -22,17 +23,13 @@ We need 2 virtual machines. The first one on the side of our office and the othe
#### Setup OpenVPN Clients #### Setup OpenVPN Clients
##### Site-to-Site Client Office Side ##### Site-to-Site Client Office Side
We need to install OpenVPN, we do it as follows: We need to install OpenVPN, we do it as follows:
```bash ```bash
apt install openvpn -y apt install openvpn -y
``` ```
After that we add our OpenVPN configuration under this path `/etc/openvpn/s2s.conf`. After that we add our OpenVPN configuration under this path `/etc/openvpn/s2s.conf`.
_s2s.conf_ *s2s.conf*
``` ```
# Use a dynamic tun device. # Use a dynamic tun device.
# For Linux 2.2 or non-Linux OSes, # For Linux 2.2 or non-Linux OSes,
@ -43,14 +40,14 @@ _s2s.conf_
dev tun dev tun
# Our OpenVPN peer is the Google gateway. # Our OpenVPN peer is the Google gateway.
remote IP_GOOGLE_VPN_CLIENT remote IP_GOOGLE_VPN_CLIENT
ifconfig 4.1.0.2 4.1.0.1 ifconfig 4.1.0.2 4.1.0.1
route 10.156.0.0 255.255.240.0 # Google Cloud VM Network route 10.156.0.0 255.255.240.0 # Google Cloud VM Network
route 10.24.0.0 255.252.0.0 # Google Kubernetes Pod Network route 10.24.0.0 255.252.0.0 # Google Kubernetes Pod Network
push "route 192.168.10.0 255.255.255.0" # Office Network push "route 192.168.10.0 255.255.255.0" # Office Network
# Our pre-shared static key # Our pre-shared static key
#secret static.key #secret static.key
@ -81,15 +78,11 @@ verb 3
log /etc/openvpn/s2s.log log /etc/openvpn/s2s.log
``` ```
We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line: We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line:
``` ```
net.ipv4.ip_forward=1 net.ipv4.ip_forward=1
``` ```
We can then start our OpenVPN client with this command: We can then start our OpenVPN client with this command:
```bash ```bash
systemctl start openvpn@s2s systemctl start openvpn@s2s
``` ```
@ -97,13 +90,11 @@ systemctl start openvpn@s2s
On the Office side we have to open the port for the OpenVPN client that the other side can connect. On the Office side we have to open the port for the OpenVPN client that the other side can connect.
##### Site-to-Site Client Google Side ##### Site-to-Site Client Google Side
When setting up the OpenVPN client on Google's site, we need to consider the following settings when creating it. When we create the machine, we need to enable this option in the network settings: When setting up the OpenVPN client on Google's site, we need to consider the following settings when creating it. When we create the machine, we need to enable this option in the network settings:
![Google Cloud Network Settings](https://i.imgur.com/OXEkhxo.png) ![Google Cloud Network Settings](https://i.imgur.com/OXEkhxo.png)
Also on this side we have to install the OpenVPN client again and then add this config under the path `/etc/openvpn/s2s.conf`: Also on this side we have to install the OpenVPN client again and then add this config under the path `/etc/openvpn/s2s.conf`:
``` ```
# Use a dynamic tun device. # Use a dynamic tun device.
# For Linux 2.2 or non-Linux OSes, # For Linux 2.2 or non-Linux OSes,
@ -114,7 +105,7 @@ Also on this side we have to install the OpenVPN client again and then add this
dev tun dev tun
# Our OpenVPN peer is the Office gateway. # Our OpenVPN peer is the Office gateway.
remote IP_OFFICE_VPN_CLIENT remote IP_OFFICE_VPN_CLIENT
ifconfig 4.1.0.2 4.1.0.1 ifconfig 4.1.0.2 4.1.0.1
@ -152,15 +143,12 @@ verb 3
log /etc/openvpn/s2s.log log /etc/openvpn/s2s.log
``` ```
We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line: We also have to enable the IPv4 forward function in the kernel, so we go to `/etc/sysctl.conf` and comment out the following line:
``` ```
net.ipv4.ip_forward=1 net.ipv4.ip_forward=1
``` ```
##### Connection test ##### Connection test
Now that both clients are basically configured we can test the connection. Both clients have to be started with systemctl. After that we look at the logs with `tail -f /etc/openvpn/s2s-log` and wait for this message: Now that both clients are basically configured we can test the connection. Both clients have to be started with systemctl. After that we look at the logs with `tail -f /etc/openvpn/s2s-log` and wait for this message:
``` ```
@ -179,7 +167,6 @@ Wed May 5 08:28:12 2021 Initialization Sequence Completed
If we can't establish a connection, we need to check if the ports are opened on both sides. If we can't establish a connection, we need to check if the ports are opened on both sides.
#### Routing Google Cloud Network #### Routing Google Cloud Network
After our clients have finished installing and configuring, we need to set the routes on Google. I will not map the Office side, as this is always different. But you have to route the networks for the Google network there as well. After our clients have finished installing and configuring, we need to set the routes on Google. I will not map the Office side, as this is always different. But you have to route the networks for the Google network there as well.
To set the route on Google we go to the network settings and then to Routes. Here you have to specify your office network so that the clients in the Google network know what to do. To set the route on Google we go to the network settings and then to Routes. Here you have to specify your office network so that the clients in the Google network know what to do.
@ -187,7 +174,6 @@ To set the route on Google we go to the network settings and then to Routes. Her
![Google Cloud Network Route](https://i.imgur.com/6Q2Drf4.png) ![Google Cloud Network Route](https://i.imgur.com/6Q2Drf4.png)
#### IP-Masquerade-Agent #### IP-Masquerade-Agent
IP masquerading is a form of network address translation (NAT) used to perform many-to-one IP address translations, which allows multiple clients to access a destination using a single IP address. A GKE cluster uses IP masquerading so that destinations outside of the cluster only receive packets from node IP addresses instead of Pod IP addresses. This is useful in environments that expect to only receive packets from node IP addresses. IP masquerading is a form of network address translation (NAT) used to perform many-to-one IP address translations, which allows multiple clients to access a destination using a single IP address. A GKE cluster uses IP masquerading so that destinations outside of the cluster only receive packets from node IP addresses instead of Pod IP addresses. This is useful in environments that expect to only receive packets from node IP addresses.
You have to edit the ip-masq-agent and this configuration is responsible for letting the pods inside the nodes, reach other parts of the GCP VPC Network, more specifically the VPN. So, it allows pods to communicate with the devices that are accessible through the VPN. You have to edit the ip-masq-agent and this configuration is responsible for letting the pods inside the nodes, reach other parts of the GCP VPC Network, more specifically the VPN. So, it allows pods to communicate with the devices that are accessible through the VPN.
@ -196,12 +182,11 @@ First of all we're gonna be working inside the kube-system namespace, and we're
```yaml ```yaml
nonMasqueradeCIDRs: nonMasqueradeCIDRs:
- 10.24.0.0/14 # The IPv4 CIDR the cluster is using for Pods (required) - 10.24.0.0/14 # The IPv4 CIDR the cluster is using for Pods (required)
- 10.156.0.0/20 # The IPv4 CIDR of the subnetwork the cluster is using for Nodes (optional, works without but I guess its better with it) - 10.156.0.0/20 # The IPv4 CIDR of the subnetwork the cluster is using for Nodes (optional, works without but I guess its better with it)
masqLinkLocal: false masqLinkLocal: false
resyncInterval: 60s resyncInterval: 60s
``` ```
and run `kubectl create configmap ip-masq-agent --from-file config --namespace kube-system` and run `kubectl create configmap ip-masq-agent --from-file config --namespace kube-system`
afterwards, configure the ip-masq-agent, put this in a `ip-masq-agent.yml` file: afterwards, configure the ip-masq-agent, put this in a `ip-masq-agent.yml` file:
@ -220,17 +205,17 @@ spec:
spec: spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: ip-masq-agent - name: ip-masq-agent
image: gcr.io/google-containers/ip-masq-agent-amd64:v2.4.1 image: gcr.io/google-containers/ip-masq-agent-amd64:v2.4.1
args: args:
- --masq-chain=IP-MASQ - --masq-chain=IP-MASQ
# To non-masquerade reserved IP ranges by default, uncomment the line below. # To non-masquerade reserved IP ranges by default, uncomment the line below.
# - --nomasq-all-reserved-ranges # - --nomasq-all-reserved-ranges
securityContext: securityContext:
privileged: true privileged: true
volumeMounts: volumeMounts:
- name: config - name: config
mountPath: /etc/config mountPath: /etc/config
volumes: volumes:
- name: config - name: config
configMap: configMap:
@ -242,14 +227,14 @@ spec:
- key: config - key: config
path: ip-masq-agent path: ip-masq-agent
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists
- effect: NoExecute - effect: NoExecute
operator: Exists operator: Exists
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
``` ```
and run `kubectl -n kube-system apply -f ip-masq-agent.yml`. and run `kubectl -n kube-system apply -f ip-masq-agent.yml`.
Now our site-to-site VPN should be set up. You should now test if you can ping the pods and if all other services work as you expect them to. Now our site-to-site VPN should be set up. You should now test if you can ping the pods and if all other services work as you expect them to.

View File

@ -1,47 +0,0 @@
---
author: Alex Wellnitz
pubDatetime: 2023-17-09T12:20:27+02:00
title: VSCode debug settings Collection
postSlug: vscode-debug-settings-collection.md
featured: true
draft: true
tags:
- vscode
- debug
- development
ogImage: ""
description: In this post, we will show you how to create a MySQL server backup using Kubernetes CronJobs.
---
### Go VSCode debugging
**launch.json:**
```json
{
"version": "0.2.0",
"configurations": [
{
"name": "Debug Package",
"type": "go",
"request": "launch",
"mode": "debug",
"program": "${workspaceRoot}"
}
]
}
```
### Java Remote VSCode debugging
**launch.json:**
```json
"version": "0.2.0",
"configurations": [
{
"type": "java",
"name": "Debug (Attach)",
"projectName": "MyApplication",
"request": "attach",
"hostName": "localhost",
"port": 8787
}
]
```

View File

@ -10,8 +10,9 @@ tags:
- network - network
- haproxy - haproxy
ogImage: "" ogImage: ""
description: To briefly explain the situation. description:
We have a **HAProxy** running on a Debian server as a Docker container. This is the entrance node to a **Docker Swarm** cluster. To briefly explain the situation.
We have a **HAProxy** running on a Debian server as a Docker container. This is the entrance node to a **Docker Swarm** cluster.
--- ---
To briefly explain the situation: To briefly explain the situation:
@ -39,4 +40,4 @@ With this setup, we get overhead into the system that we don't need. We have an
If we use a lot of micro services it is important that we use something like Docker, because then we can share the kernel and it makes the deployment much easier. If we use a lot of micro services it is important that we use something like Docker, because then we can share the kernel and it makes the deployment much easier.
But if we have only one application that is very important, it is better to keep it simple. But if we have only one application that is very important, it is better to keep it simple.

View File

@ -10,7 +10,8 @@ tags:
- linux - linux
- backup - backup
ogImage: "" ogImage: ""
description: Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease. description:
Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease.
--- ---
Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease. Since we all know that the first rule is "no backup, no pity", I'll show you how you can use Borg to back up your important data in an encrypted way with relative ease.
@ -18,17 +19,14 @@ Since we all know that the first rule is "no backup, no pity", I'll show you how
If you do not want to use a second computer, but an external hard drive, you can adjust this later in the script and ignore the points in the instructions for the second computer. If you do not want to use a second computer, but an external hard drive, you can adjust this later in the script and ignore the points in the instructions for the second computer.
### Requirements ### Requirements
- 2 Linux Computers
- 2 Linux Computers - Borg
- Borg - SSH
- SSH - Storage
- Storage - More than 5 brain cells
- More than 5 brain cells
### Installation ### Installation
First we need to install borg on both computers so that we can back up on one and save on the other. First we need to install borg on both computers so that we can back up on one and save on the other.
```bash ```bash
sudo apt install borgbackup sudo apt install borgbackup
``` ```
@ -36,13 +34,11 @@ sudo apt install borgbackup
Then we create a Borg repository. We can either use an external target or a local path. Then we create a Borg repository. We can either use an external target or a local path.
**External Target:** **External Target:**
```bash ```bash
borg init --encryption=repokey ssh://user@192.168.2.42:22/mnt/backup/borg borg init --encryption=repokey ssh://user@192.168.2.42:22/mnt/backup/borg
``` ```
**Local Path:** **Local Path:**
```bash ```bash
borg init --encryption=repokey /path/to/backup_folder borg init --encryption=repokey /path/to/backup_folder
``` ```
@ -53,7 +49,6 @@ This way you don't have to enter a password and is simply nicer from my point of
Once you have created everything and prepared the script with your parameters, I recommend that you run the script as a CronJob so that you no longer have to remember to back up your things yourself. Once you have created everything and prepared the script with your parameters, I recommend that you run the script as a CronJob so that you no longer have to remember to back up your things yourself.
**crontab example:** **crontab example:**
```bash ```bash
#Minute Hour Day Month Day(Week) command #Minute Hour Day Month Day(Week) command
#(0-59) (0-23) (1-31) (1-12) (1-7;1=Mo) #(0-59) (0-23) (1-31) (1-12) (1-7;1=Mo)
@ -61,7 +56,6 @@ Once you have created everything and prepared the script with your parameters, I
``` ```
### Automated script ### Automated script
```bash ```bash
#!/bin/sh #!/bin/sh
@ -119,7 +113,6 @@ exit ${global_exit}
``` ```
### Get your data from the backup ### Get your data from the backup
First, we create a temporary directory in which we can mount the backup. First, we create a temporary directory in which we can mount the backup.
```bash ```bash
@ -132,9 +125,7 @@ At this point you must remember that you can use an external destination or a lo
```bash ```bash
borg mount ssh://user@192.168.2.42/mnt/backup/borg /tmp/borg-backup borg mount ssh://user@192.168.2.42/mnt/backup/borg /tmp/borg-backup
``` ```
Once our repo is mounted, we can change into the directory and restore files via **rsync** or **cp**. Once our repo is mounted, we can change into the directory and restore files via **rsync** or **cp**.
### Conclusion ### Conclusion
I hope you could understand everything and now secure your shit sensibly. Because without a backup we are all lost!
I hope you could understand everything and now secure your shit sensibly. Because without a backup we are all lost!

View File

@ -35,7 +35,6 @@ const socialImageURL = new URL(
<meta name="viewport" content="width=device-width" /> <meta name="viewport" content="width=device-width" />
<link rel="icon" type="image/svg+xml" href="/favicon.svg" /> <link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<link rel="canonical" href={canonicalURL} /> <link rel="canonical" href={canonicalURL} />
<link rel="me" href="https://mastodon.social/@alexohneander" />
<meta name="generator" content={Astro.generator} /> <meta name="generator" content={Astro.generator} />
<!-- General Meta Tags --> <!-- General Meta Tags -->

View File

@ -13,8 +13,7 @@ export interface Props {
const { post } = Astro.props; const { post } = Astro.props;
const { title, author, description, ogImage, canonicalURL, pubDatetime, tags } = const { title, author, description, ogImage, canonicalURL, pubDatetime, tags } = post.data;
post.data;
const { Content } = await post.render(); const { Content } = await post.render();
@ -22,13 +21,7 @@ const ogUrl = new URL(ogImage ? ogImage : `${title}.png`, Astro.url.origin)
.href; .href;
--- ---
<Layout <Layout title={title} author={author} description={description} ogImage={ogUrl} canonicalURL={canonicalURL}>
title={title}
author={author}
description={description}
ogImage={ogUrl}
canonicalURL={canonicalURL}
>
<Header /> <Header />
<div class="mx-auto flex w-full max-w-3xl justify-start px-2"> <div class="mx-auto flex w-full max-w-3xl justify-start px-2">
<button <button

View File

@ -4,80 +4,62 @@ title: "Experience"
--- ---
### DevOps Engineer, Materna SE ### DevOps Engineer, Materna SE
**since 2023** **since 2023**
As a key globally active IT service provider, Materna advise and assist you in all aspects of digitization and provide tailor-made technologies for agile, flexible and secure IT. As a key globally active IT service provider, Materna advise and assist you in all aspects of digitization and provide tailor-made technologies for agile, flexible and secure IT.
- **Infrastructure as Code (IaC)**: - **Infrastructure as Code (IaC)**:
- Develop and maintain infrastructure as code scripts using tools like Terraform, Ansible, or CloudFormation to automate the provisioning of infrastructure resources. - Develop and maintain infrastructure as code scripts using tools like Terraform, Ansible, or CloudFormation to automate the provisioning of infrastructure resources.
- **Continuous Integration (CI) and Continuous Deployment (CD)**: - **Continuous Integration (CI) and Continuous Deployment (CD)**:
- Implement and manage CI/CD pipelines using tools like Jenkins, Travis CI, or GitLab CI to automate the software delivery process. - Implement and manage CI/CD pipelines using tools like Jenkins, Travis CI, or GitLab CI to automate the software delivery process.
- **Containerization and Orchestration**: - **Containerization and Orchestration**:
- Work with Docker containers and container orchestration platforms like Kubernetes to improve scalability and resource utilization. - Work with Docker containers and container orchestration platforms like Kubernetes to improve scalability and resource utilization.
- **Monitoring and Logging**: - **Monitoring and Logging**:
- Set up monitoring and logging solutions (e.g., Prometheus, ELK Stack) to track application performance, identify issues, and troubleshoot problems proactively. - Set up monitoring and logging solutions (e.g., Prometheus, ELK Stack) to track application performance, identify issues, and troubleshoot problems proactively.
- **Collaboration and Communication**: - **Collaboration and Communication**:
- Foster collaboration between development and operations teams, ensuring effective communication and knowledge sharing. - Foster collaboration between development and operations teams, ensuring effective communication and knowledge sharing.
- **Infrastructure Optimization**: - **Infrastructure Optimization**:
- Analyze and optimize infrastructure costs, resource utilization, and performance to achieve cost-efficiency and scalability. - Analyze and optimize infrastructure costs, resource utilization, and performance to achieve cost-efficiency and scalability.
- **Troubleshooting and Support**: - **Troubleshooting and Support**:
- Respond to incidents, diagnose problems, and provide support to ensure system reliability and availability. - Respond to incidents, diagnose problems, and provide support to ensure system reliability and availability.
### DevOps Engineer, Apozin GmbH ### DevOps Engineer, Apozin GmbH
**until 2023** **until 2023**
Apozin turns visions into a competitive advantage. Our team of pharmacists, PTA's, graphic designers, web designers, sales professionals, marketing specialists, and programmers realize holistic concepts that we constantly evolve and improve for our clients. Apozin turns visions into a competitive advantage. Our team of pharmacists, PTA's, graphic designers, web designers, sales professionals, marketing specialists, and programmers realize holistic concepts that we constantly evolve and improve for our clients.
- Operation and design of Kubernetes clusters at multiple locations - Operation and design of Kubernetes clusters at multiple locations
- Design and implementation of backup strategies - Design and implementation of backup strategies
- Deployment of various services (including HAProxy, MariaDB, MongoDB, Elasticsearch, NGINX) - Deployment of various services (including HAProxy, MariaDB, MongoDB, Elasticsearch, NGINX)
- Design and operation of comprehensive monitoring solutions (Zabbix, Grafana, Prometheus, Graylog) - Design and operation of comprehensive monitoring solutions (Zabbix, Grafana, Prometheus, Graylog)
- Design and setup of build pipelines with Jenkins, Docker, and FluxCD - Design and setup of build pipelines with Jenkins, Docker, and FluxCD
- Administration of various servers in different environments (Google Cloud, Hetzner, AWS, Digital Ocean, Hosting.de) - Administration of various servers in different environments (Google Cloud, Hetzner, AWS, Digital Ocean, Hosting.de)
### Fullstack .Net Developer, prointernet ### Fullstack .Net Developer, prointernet
**until 2019** **until 2019**
Agency for internet and design founded in 1998, established in Kastellaun in the Hunsrück region, operating worldwide, and at home on the internet. A team of designers, developers, and consultants who love what they do. Agency for internet and design founded in 1998, established in Kastellaun in the Hunsrück region, operating worldwide, and at home on the internet. A team of designers, developers, and consultants who love what they do.
- Development of web applications (C#, Dotnet, JS) - Development of web applications (C#, Dotnet, JS)
- Design of websites (Composite C1) - Design of websites (Composite C1)
- Company Website - Company Website
## Projects ## Projects
### DevOps Engineer, Cofinity-X
**since 2023**
Cofinity-X is the first operator of the Catena-X network, connecting automotive partners at every level of the value chain. As a DevOps engineer, I was responsible for the enablement services.
- Deployment of various open source Projects with GitOps and ArgoCD
- Managing projects on a Kubernetes clusters
- Communication with end customers (support, troubleshooting)
- Analysis of problems and spikes in load
- Planning new projects and deploying to the Kubernetes clusters
### DevOps Engineer, Amamed ### DevOps Engineer, Amamed
**until 2023** **until 2023**
Just right for your pharmacy! amamed is the only digital solution on the market that puts your pharmacy at the center and makes you fully equipped, secure, and flexible online. Just right for your pharmacy! amamed is the only digital solution on the market that puts your pharmacy at the center and makes you fully equipped, secure, and flexible online.
- Provision of various services (including reverse proxies, databases, load balancers) - Provision of various services (including reverse proxies, databases, load balancers)
- Operation of Docker Swarm clusters - Operation of Docker Swarm clusters
- Product Website - Product Website
### DevOps Engineer, deineApotheke ### DevOps Engineer, deineApotheke
**until 2021** **until 2021**
"deine Apotheke" supports the pharmacies in your neighborhood and paves the way for you to access pharmacy services: through our app, you can select your pharmacy and pre-order medications, even with a prescription. "deine Apotheke" supports the pharmacies in your neighborhood and paves the way for you to access pharmacy services: through our app, you can select your pharmacy and pre-order medications, even with a prescription.
- Provision of various services (including backend APIs, MariaDB clusters, NATs, Redis) - Provision of various services (including backend APIs, MariaDB clusters, NATs, Redis)
- Design and operation of Kubernetes clusters (3 locations) - Design and operation of Kubernetes clusters (3 locations)
- Management of automated pipelines via Bitbucket Pipelines (continuous integration) - Management of automated pipelines via Bitbucket Pipelines (continuous integration)
- IT administration for 6 individuals (SysOps) - IT administration for 6 individuals (SysOps)

View File

@ -41,10 +41,7 @@ const socialCount = SOCIALS.filter(social => social.active).length;
</a> </a>
<p> <p>
I'm Alex Wellnitz, a DevOps architect and software developer. I currently hold I'm Alex, a DevOps architect and software developer. I currently hold the role of DevOps Engineer at Materna, where I assist developers in accelerating web performance and provide guidance on various topics such as web development, Kubernetes, network security, and more.
the role of DevOps Engineer at Materna, where I assist developers in
accelerating web performance and provide guidance on various topics such
as web development, Kubernetes, network security, and more.
</p> </p>
<!-- <p> <!-- <p>
Read the blog posts or check Read the blog posts or check

View File

@ -1,48 +1,50 @@
/* ibm-plex-mono-regular - latin */ /* ibm-plex-mono-regular - latin */
@font-face { @font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */ font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: "IBM Plex Mono"; font-family: 'IBM Plex Mono';
font-style: normal; font-style: normal;
font-weight: 400; font-weight: 400;
src: url("/fonts/ibm-plex-mono-v19-latin-regular.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */ src: url('/fonts/ibm-plex-mono-v19-latin-regular.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
} }
/* ibm-plex-mono-500 - latin */ /* ibm-plex-mono-500 - latin */
@font-face { @font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */ font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: "IBM Plex Mono"; font-family: 'IBM Plex Mono';
font-style: normal; font-style: normal;
font-weight: 500; font-weight: 500;
src: url("/fonts/ibm-plex-mono-v19-latin-500.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */ src: url('/fonts/ibm-plex-mono-v19-latin-500.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
} }
/* ibm-plex-mono-600 - latin */ /* ibm-plex-mono-600 - latin */
@font-face { @font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */ font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: "IBM Plex Mono"; font-family: 'IBM Plex Mono';
font-style: normal; font-style: normal;
font-weight: 600; font-weight: 600;
src: url("/fonts/ibm-plex-mono-v19-latin-600.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */ src: url('/fonts/ibm-plex-mono-v19-latin-600.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
} }
/* ibm-plex-mono-600italic - latin */ /* ibm-plex-mono-600italic - latin */
@font-face { @font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */ font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: "IBM Plex Mono"; font-family: 'IBM Plex Mono';
font-style: italic; font-style: italic;
font-weight: 600; font-weight: 600;
src: url("/fonts/ibm-plex-mono-v19-latin-600italic.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */ src: url('/fonts/ibm-plex-mono-v19-latin-600italic.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
} }
/* ibm-plex-mono-700 - latin */ /* ibm-plex-mono-700 - latin */
@font-face { @font-face {
font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */ font-display: swap; /* Check https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face/font-display for other options. */
font-family: "IBM Plex Mono"; font-family: 'IBM Plex Mono';
font-style: normal; font-style: normal;
font-weight: 700; font-weight: 700;
src: url("/fonts/ibm-plex-mono-v19-latin-700.woff2") format("woff2"); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */ src: url('/fonts/ibm-plex-mono-v19-latin-700.woff2') format('woff2'); /* Chrome 36+, Opera 23+, Firefox 39+, Safari 12+, iOS 10+ */
} }
@tailwind base; @tailwind base;
@tailwind components; @tailwind components;
@tailwind utilities; @tailwind utilities;
@layer base { @layer base {
:root, :root,
html[data-theme="light"] { html[data-theme="light"] {

View File

@ -4,14 +4,30 @@
"baseUrl": "src", "baseUrl": "src",
"jsx": "react-jsx", "jsx": "react-jsx",
"paths": { "paths": {
"@assets/*": ["assets/*"], "@assets/*": [
"@config": ["config.ts"], "assets/*"
"@components/*": ["components/*"], ],
"@content/*": ["content/*"], "@config": [
"@layouts/*": ["layouts/*"], "config.ts"
"@pages/*": ["pages/*"], ],
"@styles/*": ["styles/*"], "@components/*": [
"@utils/*": ["utils/*"] "components/*"
],
"@content/*": [
"content/*"
],
"@layouts/*": [
"layouts/*"
],
"@pages/*": [
"pages/*"
],
"@styles/*": [
"styles/*"
],
"@utils/*": [
"utils/*"
]
} }
} }
} }