Compare commits

...

12 Commits

31 changed files with 1254 additions and 57 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
.DS_Store
secrets/

View File

@ -1,56 +0,0 @@
" install vim-plug
let data_dir = has('nvim') ? stdpath('data') . '/site' : '~/.vim'
if empty(glob(data_dir . '/autoload/plug.vim'))
silent execute '!curl -fLo '.data_dir.'/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim'
autocmd VimEnter * PlugInstall --sync | source $MYVIMRC
endif
" setup plugins
call plug#begin()
Plug 'preservim/nerdtree'
Plug 'ryanoasis/vim-devicons'
Plug 'vim-airline/vim-airline'
Plug 'vim-airline/vim-airline-themes'
Plug 'prabirshrestha/vim-lsp'
Plug 'mattn/vim-lsp-settings'
Plug 'prabirshrestha/asyncomplete.vim'
Plug 'prabirshrestha/asyncomplete-lsp.vim'
call plug#end()
" general settings
syntax on
set tabstop=4
set shiftwidth=4
set expandtab
set number relativenumber
" required for vim-devicons to work properly
set encoding=UTF-8
" vim-airline settings
let g:airline_theme='papercolor'
" NERDTree settings
let g:NERDTreeShowHidden=1
nnoremap <C-t> :NERDTreeToggle<CR>
" set up vim-lsp
let g:lsp_diagnostics_echo_cursor = 1
" set up vim-lsp-settings
let g:lsp_settings= {
\ 'clangd': {
\ 'cmd': ['/opt/homebrew/Cellar/llvm/19.1.6/bin/clangd'],
\ }
\}
" set up autocomplete tabing
inoremap <expr> <Tab> pumvisible() ? "\<C-n>" : "\<Tab>"
inoremap <expr> <S-Tab> pumvisible() ? "\<C-p>" : "\<S-Tab>"
inoremap <expr> <cr> pumvisible() ? asyncomplete#close_popup() : "\<cr>"
" set up automatic braces
inoremap { {<CR>}<Esc>ko
inoremap ( ()<Esc>i
inoremap [ []<Esc>i
inoremap " ""<Esc>i

View File

@ -0,0 +1,22 @@
# NAxS Homelab
## Prerequisites
0. Make sure that your user is part of the docker group
- `cat /etc/group | grep docker` - if the entry looks like `docker:x:<Group ID>:<username>`, you're good to go
- Otherwise please run `sudo usermod -aG docker <username>`, followed by logging out & in again for these changes to take into effect
1. Create a default network called homelab
```
docker network create homelab
```
2. Set up 1Password for access to secrets
- Install `pass` & `gpg`
- Generate key with `gpg --full-generate-key`
- stick to defaults
- as password, use `GPG cert password` stored inside the `NAxS Homelab` vault in 1Password
- Initialize password storage with `pass init "GPG key ID"`
- You can check out the ID by using `gpg --list-secret-keys --keyid-format LONG` - you should see a line with `sec`, containing the following information `<encryption technology>/ID`
- Store the 1Password service account token in `pass` as `op-sa_token` by executing `pass insert op-sa_token`
- Make sure your .zshrc file loads the token into the `OP_SERVICE_ACCOUNT_TOKEN` (this is needed by the 1Password CLI for authentication purposes when loading the secrets) environment variable by executing `export OP_SERVICE_ACCOUNT_TOKEN="$(pass op-sa_token)"`
// TODO: Create template script
Template script which helps with setting up new applications (asks for potential secrets needs, adds default network to compose file, creates new users/groups to run containers rootless)

View File

@ -0,0 +1,180 @@
#!/bin/zsh
while [[ "$#" -gt 0 ]]
do
case $1 in
--app_name) app_name="$2"
shift;;
*) echo "Unknown parameter passed: $1"
exit 1;;
esac
shift
done
# Make sure app_name exists and is not empty
if [[ -z "$app_name" ]]; then
echo "app_name is unset or empty."
exit 1;
fi
# Define the base directory for the new application structure
# This will be one level up from where this script is executed.
app_base_dir="../${app_name}"
app_data_dir="${app_base_dir}/data"
compose_file="${app_base_dir}/compose.yaml"
readme_file="${app_base_dir}/README.md"
start_script="${app_base_dir}/start.sh"
echo "--- Setting up files and directories for application: ${app_name} ---"
echo "Base directory: ${app_base_dir}"
# 1. Create the application base directory and data directory
echo "Creating directory: ${app_data_dir}"
mkdir -p "${app_data_dir}" || { echo "Error: Failed to create directory ${app_data_dir}"; exit 1; }
# 2. Create compose.yaml and README.md
echo "Creating file: ${compose_file}"
touch "${compose_file}" || { echo "Error: Failed to create file ${compose_file}"; exit 1; }
echo "Creating file: ${readme_file}"
touch "${readme_file}" || { echo "Error: Failed to create file ${readme_file}"; exit 1; }
# 3. Prepopulate README.md with title case app name
# Convert app_name to title case (first letter of each word capitalized)
# This is a simple conversion; for more robust title casing, a function would be better.
app_name_title_case=$(echo "$app_name" | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) tolower(substr($i,2)) }}1')
echo "Prepopulating ${readme_file}..."
cat > "${readme_file}" << EOF
# ${app_name_title_case}
This is the README file for the **${app_name}** application.
## Overview
Provide a brief description of your application here.
## Setup
Instructions for setting up the application.
EOF
# 4. Ask the user if secrets will be required
read "requires_secrets?Will this application require secrets (yes/no)? "
requires_secrets=$(echo "$requires_secrets" | tr '[:upper:]' '[:lower:]') # Convert to lowercase
# 5. Prepopulate compose.yaml based on user selection re. secrets
echo "Prepopulating ${compose_file} based on secrets requirement..."
if [[ "$requires_secrets" == "yes" || "$requires_secrets" == "y" ]]; then
cat > "${compose_file}" << EOF
version: '3.8'
services:
${app_name}:
image: your-app-image:latest
container_name: ${app_name}-container
ports:
- "8080:8080"
volumes:
- ./data:/app/data
environment:
# Example of how to pass secrets as environment variables
# These would typically be loaded from a .env file or a secrets management system
- DATABASE_URL=\${DATABASE_URL}
- API_KEY=\${API_KEY}
# Example of secrets usage with Docker Compose secrets (requires Docker Swarm or specific setup)
# secrets:
# - my_database_password
# - my_api_key
# deploy:
# resources:
# limits:
# cpus: '0.5'
# memory: 512M
secrets:
my_database_password:
external: true # Assumes secret is created externally (e.g., docker secret create)
my_api_key:
external: true
EOF
else
cat > "${compose_file}" << EOF
services:
${app_name}:
image: your-app-image:latest
container_name: ${app_name}-container
ports:
- "8080:8080"
volumes:
- ./data:/app/data
# No secrets configuration needed for this version
# deploy:
# resources:
# limits:
# cpus: '0.5'
# memory: 512M
EOF
fi
# 6. If secrets will be used - touch ../"app_name"/start.sh and prepopulate it
if [[ "$requires_secrets" == "yes" || "$requires_secrets" == "y" ]]; then
echo "Creating and prepopulating ${start_script}..."
cat > "${start_script}" << EOF
#!/bin/zsh
# Example start script for an application using secrets
# This script assumes you have a .env file or similar mechanism
# for loading secrets into the environment before starting Docker Compose.
# --- IMPORTANT: Replace with your actual secret loading mechanism ---
# Example 1: Load from a .env file
# if [[ -f ".env" ]]; then
# echo "Loading environment variables from .env"
# source .env
# else
# echo "Warning: .env file not found. Secrets might be missing."
# fi
# Example 2: Using a secrets management tool (e.g., HashiCorp Vault, AWS Secrets Manager)
# echo "Fetching secrets from Vault..."
# export DATABASE_URL=\$(vault read -field=value secret/myapp/database_url)
# export API_KEY=\$(vault read -field=value secret/myapp/api_key)
# -------------------------------------------------------------------
echo "Starting Docker Compose services for ${app_name}..."
docker compose -f "${compose_file}" up -d
echo "Application ${app_name} started. Check logs with: docker compose logs -f ${app_name}"
EOF
chmod +x "${start_script}" # Make the start script executable
fi
# 7. If secrets will be used, append another chapter at the end of the README.md file
if [[ "$requires_secrets" == "yes" || "$requires_secrets" == "y" ]]; then
echo "Appending 'Secrets Management' chapter to ${readme_file}..."
cat >> "${readme_file}" << EOF
## Secrets Management
This application utilizes secrets for sensitive configurations (e.g., database credentials, API keys).
### Configuration
Secrets are expected to be provided via environment variables or Docker Compose secrets.
Refer to the \`start.sh\` script for an example of how to load these secrets before application startup.
**Example Environment Variables (to be set in your environment or a \`.env\` file):**
\`\`\`
DATABASE_URL="postgres://user:password@host:port/database"
API_KEY="your_super_secret_api_key"
\`\`\`
**For Docker Compose secrets:**
Ensure the necessary Docker secrets are created on your Docker host or Swarm cluster.
\`\`\`bash
docker secret create my_database_password <(echo "your_db_password")
docker secret create my_api_key <(echo "your_api_key")
\`\`\`
Then, update the \`compose.yaml\` to reference these secrets.
EOF
fi
echo "--- Setup complete for ${app_name} ---"
echo "Check the '${app_base_dir}' directory for your new files."

View File

@ -0,0 +1,80 @@
#!/bin/zsh
while [[ "$#" -gt 0 ]]
do
case $1 in
--app_name) app_name="$2"
shift;;
--id) desired_id="$2"
shift;;
*) echo "Unknown parameter passed: $1"
exit 1;;
esac
shift
done
# Validate desired_id is a number
if ! [[ "$desired_id" =~ ^[0-9]+$ ]]; then
echo "Error: Invalid UID/GID. Please enter a numeric value."
exit 1
fi
user_name="${app_name}-user"
group_name="${app_name}-group"
echo "--- Checking/Creating User and Group for ${app_name} ---"
# --- Handle Group ---
echo "Checking group: ${group_name}"
existing_gid=$(getent group "${group_name}" | cut -d: -f3)
if [[ -n "$existing_gid" ]]; then
if [[ "$existing_gid" -eq "$desired_id" ]]; then
echo "Group '${group_name}' already exists with the correct GID (${desired_id})."
else
echo "Group '${group_name}' exists with GID ${existing_gid}, but desired GID is ${desired_id}."
echo "Attempting to modify group GID..."
if sudo groupmod -g "$desired_id" "${group_name}"; then
echo "Successfully adjusted group '${group_name}' to GID ${desired_id}."
else
echo "Failed to adjust group '${group_name}' GID. Please check permissions or try manually."
exit 1
fi
fi
else
echo "Group '${group_name}' does not exist. Creating..."
if sudo groupadd -g "$desired_id" "${group_name}"; then
echo "Successfully created group '${group_name}' with GID ${desired_id}."
else
echo "Failed to create group '${group_name}'. Please check permissions or try manually."
exit 1
fi
fi
# --- Handle User ---
echo "Checking user: ${user_name}"
existing_uid=$(getent passwd "${user_name}" | cut -d: -f3)
if [[ -n "$existing_uid" ]]; then
if [[ "$existing_uid" -eq "$desired_id" ]]; then
echo "User '${user_name}' already exists with the correct UID (${desired_id})."
else
echo "User '${user_name}' exists with UID ${existing_uid}, but desired UID is ${desired_id}."
echo "Attempting to modify user UID..."
if sudo usermod -u "$desired_id" -g "$desired_id" "${user_name}"; then
echo "Successfully adjusted user '${user_name}' to UID ${desired_id} and primary GID ${desired_id}."
else
echo "Failed to adjust user '${user_name}' UID/GID. Please check permissions or try manually."
exit 1
fi
fi
else
echo "User '${user_name}' does not exist. Creating..."
if sudo useradd -u "$desired_id" -g "$desired_id" -s /sbin/nologin -c "Application User for ${app_name}" "${user_name}"; then
echo "Successfully created user '${user_name}' with UID ${desired_id} and primary GID ${desired_id}."
else
echo "Failed to create user '${user_name}'. Please check permissions or try manually."
exit 1
fi
fi
echo "--- Operation complete for ${app_name} ---"

View File

@ -0,0 +1,8 @@
#!/bin/zsh
# Ask for the application name
read "app_name?Enter the name of the application: "
# Ask for the desired UID/GID
read "desired_id?Enter the desired UID/GID for the application (e.g., 1001): "
./create-user.sh --app_name ${app_name} --id ${desired_id}
./create-files+directories.sh --app_name ${app_name}

View File

@ -0,0 +1,48 @@
# --- Home Assistant ---
(logging) {
log {
output file /var/log/caddy/access.log {
# Roll logs to save space
roll_size 100mb
roll_keep 10
roll_keep_for 720h # 30 days
}
format json
level INFO # This ensures all requests (INFO, WARN, ERROR) are logged
}
}
# --- top domain ---
saljic.me {
import logging
respond "Welcome! In the making..."
}
ha.saljic.me {
import logging
reverse_proxy 10.10.10.6:8123
}
# --- FreshRSS ---
feed.saljic.me {
import logging
reverse_proxy 10.10.10.6:8081
}
# --- Immich ---
tagebuch.saljic.me {
import logging
reverse_proxy 10.10.10.6:2283
}
# --- Gitea ---
git.saljic.me {
import logging
reverse_proxy 10.10.10.6:8030
}
# --- ntfy ---
ntfy.saljic.me {
import logging
reverse_proxy 10.10.10.6:8500
}

View File

@ -0,0 +1,155 @@
{
"admin": {
"listen": "127.0.0.1:2019"
},
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"logs": {
"default_logger_name": "default"
},
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "10.10.10.6:8123"
}
]
}
],
"match": [
{
"host": [
"ha.saljic.me"
],
"remote_ip": {
"ranges": [
"217.82.27.57"
]
}
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": "401" }
],
"match": [
{
"host": [
"ha.saljic.me"
],
"not": [
{
"remote_ip": {
"ranges": [
"217.82.27.57"
]
}
}
]
}
]
},
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "10.10.10.6:8000"
}
]
}
],
"match": [
{
"host": [
"baby.saljic.me"
],
"remote_ip": {
"ranges": [
"217.82.27.57"
]
}
}
]
},
{ "handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": "401"
}
],
"match": [
{
"host": [
"baby.saljic.me"
],
"not": [
{
"remote_ip": {
"ranges": [
"217.82.27.57"
]
}
}
]
}
]
},
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "10.10.10.6:2283"
}
]
}
],
"match": [
{
"host": [
"tagebuch.saljic.me"
]
}
]
}
]
}
}
}
},
"logging": {
"logs": {
"default": {
"encoder": {
"format": "json"
},
"level": "INFO",
"writer": {
"filename": "/var/log/caddy/access.log",
"output": "file",
"roll_gzip": false,
"roll_keep": 5,
"roll_keep_days": 60,
"roll_local_time": false,
"roll_size_mb": 100
}
}
}
}
}

View File

@ -0,0 +1,38 @@
# Gitea
## Prerequisites
### Set up database
- Create database called `gitea` in Postgres
### Set up non-root user for container
We are providing a non-root user to the container to limit the attack surface for privilege escalations. In order for this to work in our setup, please make sure to check if you have a user called `gitea` set up
1. Check if user `gitea`
```
cat /etc/passwd | grep gitea
```
In case user doesn't exist, please create the user by running
```
sudo useradd gitea
```
2. `data` & `config` folder ownership
Also you need to make sure that the `gitea` owner owns the volumes mounted for docker
```
sudo chown -R gitea:gitea data
sudo chmod 770 data
sudo chown -R gitea:gitea config
sudo chmod 770 config
```
3. Adjust compose.yml
Within `services > gitea > user`, make sure to replace `gitea` with the UID of the user on your machine
```
cat /etc/passwd | grep gitea
```
## Initial setup
1. `docker compose up -d`
2. Open IP:8030 and continue set up

View File

@ -0,0 +1,15 @@
services:
gitea:
image: docker.gitea.com/gitea:1-rootless
container_name: gitea
user: "gitea"
environment:
DISABLE_REGISTRATION: true
restart: always
volumes: ['./data:/var/lib/gitea', './config:/etc/gitea']
ports: ['8030:3000', '2222:2222']
networks: ['homelab']
networks:
homelab:
external: true

View File

@ -0,0 +1,19 @@
# Loads default set of integrations. Do not remove.
default_config:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 192.168.100.5
homeassistant:
external_url: "https://ha.saljic.me"
internal_url: "http://10.10.10.6:8123"
sensor: !include sensor.yaml

View File

@ -0,0 +1,34 @@
# Postgres
## Set up non-root user for container
We are providing a non-root user to the container to limit the attack surface for privilege escalations. In order for this to work in our setup, please make sure to check if you have a user called `postgres` set up.
1. Check if user `postgres` exists
```
cat /etc/passwd | grep postgres
```
In case the `postgres` user doesn't exist, please create the user by running
```
sudo useradd postgres
```
2. `data` folder ownership
Also you need to make sure that the `postgres` owner owns the volumes mounted for docker
```
sudo chown -R postgres:postgres data
sudo chmod 770 data
```
3. Adjust compose.yml
Within `services > postgres > user`, make sure to replace `postgres` with the UID of the user on your machine
```
cat /etc/passwd | grep postgres
```
## About secrets
In order to manage secrets centrally in 1Password and due to the need for secrets in Postgres, using `docker compose` directly in the terminal does not work.
## Bring up/tear down container
Please use the `start.sh` to spin up the container
### Prerequisites start.sh
- User executing the script is part of the `docker` group
- Environment variable `OP_SERVICE_ACCOUNT_TOKEN` is set up \[check out top-level README.md for more information on how to set this up\]

View File

@ -0,0 +1,23 @@
secrets:
postgres_password:
environment: POSTGRES_PASSWORD
postgres_user:
environment: POSTGRES_USER
services:
postgres:
image: postgres:18
container_name: postgres
user: "1002"
restart: always
shm_size: 1024mb
environment:
POSTGRES_USER_FILE: /run/secrets/postgres_user
POSTGRES_PASSWORD_FILE: /run/secrets/postgres_password
secrets: ['postgres_password', 'postgres_user']
ports: ['5432:5432']
volumes: ['./data:/var/lib/postgresql']
networks: ['homelab']
networks:
homelab:
external: true

View File

@ -0,0 +1,13 @@
#!/bin/zsh
# Exit immediately if a command exits with a non-zero status.
set -e
echo "--- Starting Docker Secret Management ---"
# Mount secrets
export POSTGRES_USER="$(op read 'op://NAxS Homelab/Postgres Homelab/username')"
export POSTGRES_PASSWORD="$(op read 'op://NAxS Homelab/Postgres Homelab/password')"
# Bring up container
docker compose up -d
echo "--- Docker Secret Management Complete ---"

View File

@ -0,0 +1 @@
Installation instructions for Bonjour reflection: https://github.com/nberlee/bonjour-reflector/blob/main/docs/RouterOS/README.md

View File

@ -0,0 +1,145 @@
# ARP Table Monitor Script for MikroTik RouterOS
# This script monitors ARP table and sends REST API notifications for new MAC+IP combinations
# Configuration variables
:local apiEndpoint "http://10.10.10.6:8500/mikrotik-389210-instant"
:local monitorInterfaces {"vlan-guest"; "vlan-iot"; "vlan-home"}
# Create global variable to store known MAC+IP combinations if it doesn't exist
:global arpKnownDevices
:if ([:typeof $arpKnownDevices] = "nothing") do={
:set arpKnownDevices [:toarray ""]
}
# Uncomment while testing, comment for production
# :set arpKnownDevices [:toarray ""]
# Get current ARP entries
:local currentArpEntries [:toarray ""]
:local arpEntries
# Get ARP entries - filter by interface if specified
:if ([:len $monitorInterfaces] > 0) do={
:set arpEntries [:toarray ""]
:foreach iface in=$monitorInterfaces do={
:do {
:local ifaceEntries [/ip arp find interface=$iface]
:foreach entry in=$ifaceEntries do={
:if ([:len $arpEntries] = 0) do={
:set arpEntries $entry
} else={
:set arpEntries ($arpEntries, $entry)
}
}
} on-error={
:log warning "Could not access interface: $iface"
}
}
:if ([:len $arpEntries] > 0) do={
:set arpEntries [:toarray $arpEntries]
}
} else={
# Monitor all interfaces
:set arpEntries [/ip arp find]
}
# Check for new MAC+IP combinations
:local newDevicesFound false
:local requestBody ""
:if ([:len $arpEntries] > 0) do={
:foreach arpEntry in=$arpEntries do={
:local arpInfo [/ip arp get $arpEntry]
# Skip incomplete, invalid, or dynamic entries if desired
:local arpStatus ($arpInfo->"status")
:if ($arpStatus = "reachable" || $arpStatus = "stale" || $arpStatus = "delay") do={
:local macAddress ($arpInfo->"mac-address")
:local ipAddress ($arpInfo->"address")
:local interface ($arpInfo->"interface")
# Create unique identifier for MAC+IP combination
:local deviceId ($macAddress . "-" . $ipAddress)
# Check if this MAC+IP combination is already known
:local isKnown false
:if ([:len $arpKnownDevices] > 0) do={
:if ([:find $arpKnownDevices $deviceId] >= 0) do={
:set isKnown true
}
}
# If not known, process as new device
:if (!$isKnown) do={
:set newDevicesFound true
# Get additional info
:local arpComment ""
:if ([:typeof ($arpInfo->"comment")] != "nothing") do={
:set arpComment ($arpInfo->"comment")
} else={
:set arpComment "N/A"
}
# Try to get hostname from DHCP lease table (if available)
:local hostname "Unknown"
:do {
:local dhcpLease [/ip dhcp-server lease find address=$ipAddress]
:if ([:len $dhcpLease] > 0) do={
:local leaseInfo [/ip dhcp-server lease get [:pick $dhcpLease 0]]
:if ([:typeof ($leaseInfo->"host-name")] != "nothing") do={
:set hostname ($leaseInfo->"host-name")
}
}
} on-error={
# DHCP lookup failed, keep hostname as "Unknown"
}
# Build notification body
:set requestBody ($requestBody . "MAC+IP Address: " . $deviceId . "\n")
:set requestBody ($requestBody . "Interface: " . $interface . "\n")
:set requestBody ($requestBody . "ARP Status: " . $arpStatus . "\n")
:set requestBody ($requestBody . "Hostname: " . $hostname . "\n")
:set requestBody ($requestBody . "----------------------------------------\n")
# Add to known devices
:if ([:len $arpKnownDevices] = 0) do={
:set arpKnownDevices $deviceId
} else={
:set arpKnownDevices ($arpKnownDevices . "," . $deviceId)
}
:log info "New ARP entry detected: $macAddress at $ipAddress on $interface"
}
}
}
}
# Send REST API notification for new devices
:if ($newDevicesFound) do={
:do {
/tool fetch \
url=$apiEndpoint \
http-method=post \
http-header-field="Priority: 3,Title: New device detected in network,Tags: warning" \
http-data=$requestBody \
keep-result=no
:log info "ARP monitoring notification API request sent"
} on-error={
:log error "Failed to send ARP monitoring notification API request"
}
} else={
:log info "No new mac/ip combinations in ARP"
}
# Clean up known devices list if it gets too large
:local knownCount 0
:if ([:len $arpKnownDevices] > 0) do={
:set knownCount [:len [:toarray $arpKnownDevices]]
}
:if ($knownCount > 2000) do={
:log warning "Known ARP devices list is getting large ($knownCount entries), consider resetting it"
# Uncomment the next line to auto-reset when list gets too large
# :global arpKnownDevices ""
}

View File

@ -0,0 +1,39 @@
net_interface = "eth0"
[devices]
[devices."B0:22:7A:91:01:CB"]
description = "HP Printer"
origin_pool = 10
shared_pools = [30]
[devices."48:A6:B8:A5:BC:7E"]
description = "Sonos One #1"
origin_pool = 10
shared_pools = [30]
[devices."48:A6:B8:A5:BB:7C"]
description = "Sonos One #2"
origin_pool = 10
shared_pools = [30]
[devices."48:A6:B8:B0:4C:80"]
description = "Sonos Arc"
origin_pool = 10
shared_pools = [30]
[devices."F0:B3:EC:05:48:3D"]
description = "Apple TV"
origin_pool = 10
shared_pools = [30]
[vlan]
[vlan.10]
ip_source = "10.10.10.2"
[vlan.20]
ip_source = "10.10.20.2"
[vlan.30]
ip_source = "10.10.30.2"

15
homelab/mikrotik/mqtt.rsc Normal file
View File

@ -0,0 +1,15 @@
:local cpuLoad [/system/resource/get cpu-load]
:local cpuTemperature [/system health get [find name="temperature"] value]
:local sfp1Info [/interface ethernet monitor sfp-sfpplus1 once as-value]
:local sfpTemperature ($sfp1Info->"sfp-temperature")
:local pppoeClientInfo [/interface pppoe-client monitor [find name="pppoe-telekom"] once as-value]
:local pppoeUptime ($pppoeClientInfo->"uptime")
:local totalRAM [/system/resource/get total-memory]
:local freeRAM [/system/resource/get free-memory]
:local usedRAM ($totalRAM - $freeRAM)
:local ramUtilization (($usedRAM * 100) / $totalRAM)
/iot mqtt publish message="{\"cpu\": \"$cpuLoad\", \"ram\": \"$ramUtilization\", \"cpu_temp\": \"$cpuTemperature\", \"sfp_temp\": \"$sfpTemperature\", \"pppoe_telekom_uptime\": \"$pppoeUptime\"}" broker="mosquitto" topic="mikrotik-info"

View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
set -euo pipefail
# Simple installer for Ubuntu server:
# - unattended-upgrades (security updates + automatic reboot)
# - Docker (engine + compose plugin) per Docker docs steps 1-3
# - zsh (set as default shell for original user)
# - 1Password CLI for access to secrets
# - secret-tools for storing tokens needed (i.e. for 1Password CLI)
# Must be run as root
if [ "$EUID" -ne 0 ]; then
echo "Please run as root: sudo bash $0"
exit 1
fi
# Detect target user to set default shell for
TARGET_USER="${SUDO_USER:-$(whoami)}"
apt-get update
# 1) Enable automatic security updates and automatic reboot
apt-get install -y unattended-upgrades
# Enable periodic updates/unattended-upgrades
cat > /etc/apt/apt.conf.d/20auto-upgrades <<'EOF'
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";
EOF
# Ensure automatic reboot after unattended-upgrades (time adjustable)
cat > /etc/apt/apt.conf.d/99auto-reboot <<'EOF'
Unattended-Upgrade::Automatic-Reboot "true";
Unattended-Upgrade::Automatic-Reboot-Time "04:00";
EOF
# Start/enable unattended-upgrades (if system uses service/timer)
if systemctl list-unit-files | grep -q unattended-upgrades; then
systemctl enable --now unattended-upgrades || true
fi
# 2) Install Docker (steps 1-3 from Docker docs)
# Install prerequisites
apt-get install -y ca-certificates curl gnupg lsb-release
# Create keyrings dir and add Docker GPG key
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
# Add Docker apt repository
ARCH=$(dpkg --print-architecture)
. /etc/os-release
UBU_CODENAME="${UBUNTU_CODENAME:-$VERSION_CODENAME}"
echo "deb [arch=${ARCH} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu ${UBU_CODENAME} stable" \
> /etc/apt/sources.list.d/docker.list
apt-get update
# Install Docker Engine + plugins including compose plugin
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# Verify Docker works by running hello-world (this will pull an image)
if command -v docker >/dev/null 2>&1; then
docker run --rm hello-world || true
fi
# 3) Install zsh and make it the default shell for the target user
apt-get install -y zsh
ZSH_PATH="$(which zsh)"
if ! grep -q "^${ZSH_PATH}$" /etc/shells; then
echo "${ZSH_PATH}" >> /etc/shells
fi
# Change shell for target user (if possible)
if id "${TARGET_USER}" >/dev/null 2>&1; then
chsh -s "${ZSH_PATH}" "${TARGET_USER}" || echo "chsh failed for ${TARGET_USER}; you may need to run 'chsh -s ${ZSH_PATH} ${TARGET_USER}' manually"
else
echo "User ${TARGET_USER} not found; skipping chsh"
fi
# 4) Install 1Password CLI for access to secrets
curl -sS https://downloads.1password.com/linux/keys/1password.asc | \
gpg --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/$(dpkg --print-architecture) stable main" | \
tee /etc/apt/sources.list.d/1password.list && \
mkdir -p /etc/debsig/policies/AC2D62742012EA22/ && \
curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | \
tee /etc/debsig/policies/AC2D62742012EA22/1password.pol && \
mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 && \
curl -sS https://downloads.1password.com/linux/keys/1password.asc | \
gpg --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg && \
apt update && apt install 1password-cli
# Check successful install
op --version
# 5) Install gnome-keyring secret-tool for securely storing tokens
apt install pass gnupg2
echo "Done. Recommended: log out and back in (or reboot) to start using zsh and ensure all services are active."

86
homelab/vps/wg_monitor.sh Normal file
View File

@ -0,0 +1,86 @@
#!/bin/bash
# IP address to ping
IP="10.10.10.6"
# Number of ping attempts
COUNT=5
# Log file
LOG_FILE="/var/log/wg_monitor.log"
# Function to log messages with timestamp
log_message() {
# Create log file if it doesn't exist and set proper permissions
if [ ! -f "$LOG_FILE" ]; then
touch "$LOG_FILE"
chmod 644 "$LOG_FILE"
fi
local message="$(date '+%Y-%m-%d %H:%M:%S') - $1"
echo "$message" >> "$LOG_FILE"
echo "$message" # Also print to console for debugging
}
# Always log script start
log_message "Script started - pinging $IP"
# Ping the IP address
echo "Executing ping command..."
ping_result=$(ping -c $COUNT -W 5 $IP 2>&1)
exit_code=$?
echo "Ping exit code: $exit_code"
echo "Ping result:"
echo "$ping_result"
# Check if ping failed completely
if [ $exit_code -eq 1 ]; then
log_message "Ping failed with exit code 1"
# Extract packet loss percentage
packet_loss=$(echo "$ping_result" | grep -o '[0-9]*% packet loss' | grep -o '[0-9]*')
echo "Packet loss: $packet_loss%"
log_message "Packet loss detected: $packet_loss%"
# If 100% packet loss, restart WireGuard
if [ "$packet_loss" = "100" ]; then
log_message "100% packet loss detected for $IP. Restarting WireGuard interface wg0..."
# Stop WireGuard interface
echo "Stopping WireGuard..."
/usr/bin/wg-quick down wg0
down_result=$?
if [ $down_result -eq 0 ]; then
log_message "WireGuard interface wg0 stopped successfully"
else
log_message "Failed to stop WireGuard interface wg0 (exit code: $down_result)"
fi
sleep 2
# Start WireGuard interface
echo "Starting WireGuard..."
/usr/bin/wg-quick up wg0
up_result=$?
if [ $up_result -eq 0 ]; then
log_message "WireGuard interface wg0 started successfully"
else
log_message "Failed to start WireGuard interface wg0 (exit code: $up_result)"
fi
else
log_message "Ping failed but not 100% packet loss ($packet_loss%)"
fi
elif [ $exit_code -eq 0 ]; then
# Ping successful
log_message "Ping to $IP successful"
echo "Ping successful"
else
log_message "Ping command failed with exit code $exit_code"
echo "Ping failed with exit code $exit_code"
fi
log_message "Script completed"
echo "Script completed"

View File

@ -0,0 +1,29 @@
function run(argv) {
const title = $.NSProcessInfo.processInfo.environment.objectForKey("title").js;
const notes = $.NSProcessInfo.processInfo.environment.objectForKey("notes").js
function addToOmnifocus(transportText) {
newTasks = Task.byParsingTransportText(transportText, true);
taskID = newTasks[0].id.primaryKey;
URL.fromString("omnifocus:///task/" + taskID).open();
}
function generateTransportText(title, notes) {
const tag = 'work';
let transportText = `${title} @${tag}`;
if (notes) {
transportText = `${transportText} // ${notes}`
}
return transportText;
}
const transportText = generateTransportText(title, notes);
const encodedFunctionAndInput = `%28${encodeURIComponent(addToOmnifocus.toString())}%29%28argument%29&arg=%22${encodeURIComponent(transportText)}%22`;
const omnifocusUrl = `omnifocus://localhost/omnijs-run?script=${encodedFunctionAndInput}`;
console.log(omnifocusUrl);
let app = Application.currentApplication();
app.includeStandardAdditions = true;
app.openLocation(omnifocusUrl);
}

Binary file not shown.

View File

@ -275,6 +275,7 @@
"Green Component" : 0.99263292551040649
},
"Flashing Bell" : false,
"Show Status Bar" : true,
"Use Italic Font" : true,
"Tab Color (Light)" : {
"Red Component" : 0.23137256503105164,
@ -560,6 +561,107 @@
},
"Blur" : false,
"Use Separate Colors for Light and Dark Mode" : true,
"Status Bar Layout" : {
"components" : [
{
"class" : "iTermStatusBarGitComponent",
"configuration" : {
"knobs" : {
"maxwidth" : 1.7976931348623157e+308,
"iTermStatusBarGitComponentPollingIntervalKey" : 2,
"base: priority" : 5,
"shared text color" : {
"Red Component" : 0.86000317335128784,
"Color Space" : "P3",
"Blue Component" : 0.63590961694717407,
"Alpha Component" : 1,
"Green Component" : 0.6414334774017334
},
"base: compression resistance" : 1,
"minwidth" : 0
},
"layout advanced configuration dictionary value" : {
"remove empty components" : false,
"font" : ".SFNS-Regular 12",
"algorithm" : 0,
"auto-rainbow style" : 3
}
}
},
{
"class" : "iTermStatusBarCPUUtilizationComponent",
"configuration" : {
"knobs" : {
"base: priority" : 5,
"base: compression resistance" : 1,
"shared text color" : {
"Red Component" : 0.72450786828994751,
"Color Space" : "P3",
"Blue Component" : 0.65540963411331177,
"Alpha Component" : 1,
"Green Component" : 0.89377599954605103
}
},
"layout advanced configuration dictionary value" : {
"remove empty components" : false,
"font" : ".SFNS-Regular 12",
"algorithm" : 0,
"auto-rainbow style" : 3
}
}
},
{
"class" : "iTermStatusBarMemoryUtilizationComponent",
"configuration" : {
"knobs" : {
"base: priority" : 5,
"base: compression resistance" : 1,
"shared text color" : {
"Red Component" : 0.64865356683731079,
"Color Space" : "P3",
"Blue Component" : 0.8851432204246521,
"Alpha Component" : 1,
"Green Component" : 0.72423571348190308
}
},
"layout advanced configuration dictionary value" : {
"remove empty components" : false,
"font" : ".SFNS-Regular 12",
"algorithm" : 0,
"auto-rainbow style" : 3
}
}
},
{
"class" : "iTermStatusBarNetworkUtilizationComponent",
"configuration" : {
"knobs" : {
"base: priority" : 5,
"base: compression resistance" : 1,
"shared text color" : {
"Red Component" : 0.86000508069992065,
"Color Space" : "P3",
"Blue Component" : 0.76881867647171021,
"Alpha Component" : 1,
"Green Component" : 0.64143049716949463
}
},
"layout advanced configuration dictionary value" : {
"remove empty components" : false,
"font" : ".SFNS-Regular 12",
"algorithm" : 0,
"auto-rainbow style" : 3
}
}
}
],
"advanced configuration" : {
"remove empty components" : false,
"font" : ".SFNS-Regular 12",
"algorithm" : 0,
"auto-rainbow style" : 3
}
},
"Background Color (Light)" : {
"Red Component" : 0.89803922176361084,
"Color Space" : "sRGB",

85
working_machine/vimrc Normal file
View File

@ -0,0 +1,85 @@
" install vim-plug
let data_dir = has('nvim') ? stdpath('data') . '/site' : '~/.vim'
if empty(glob(data_dir . '/autoload/plug.vim'))
silent execute '!curl -fLo '.data_dir.'/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim'
autocmd VimEnter * PlugInstall --sync | source $MYVIMRC
endif
" setup plugins
call plug#begin()
Plug 'preservim/nerdtree'
Plug 'ryanoasis/vim-devicons'
Plug 'vim-airline/vim-airline'
Plug 'vim-airline/vim-airline-themes'
Plug 'prabirshrestha/vim-lsp'
Plug 'mattn/vim-lsp-settings'
Plug 'prabirshrestha/asyncomplete.vim'
Plug 'prabirshrestha/asyncomplete-lsp.vim'
call plug#end()
" general settings
syntax on
set tabstop=4
set shiftwidth=4
set expandtab
set number relativenumber
" required for vim-devicons to work properly
set encoding=UTF-8
" vim-airline settings
let g:airline_theme='papercolor'
" NERDTree settings
let g:NERDTreeShowHidden=1
nnoremap <C-t> :NERDTreeToggle<CR>
" set up vim-lsp
let g:lsp_diagnostics_echo_cursor = 1
" set up vim-lsp-settings
let g:lsp_settings= {
\ 'clangd': {
\ 'cmd': ['/opt/homebrew/Cellar/llvm/19.1.6/bin/clangd'],
\ }
\}
" set up autocomplete tabing
inoremap <expr> <Tab> pumvisible() ? "\<C-n>" : "\<Tab>"
inoremap <expr> <S-Tab> pumvisible() ? "\<C-p>" : "\<S-Tab>"
inoremap <expr> <cr> pumvisible() ? asyncomplete#close_popup() : "\<cr>"
" set up automatic braces
inoremap { {<CR>}<Esc>ko
inoremap ( ()<Esc>i
inoremap [ []<Esc>i
inoremap " ""<Esc>i
" set up bracketed paste
if !has('gui_running') && &term =~ '^\%(screen\|tmux\)'
" Better mouse support, see :help 'ttymouse'
set ttymouse=sgr
" Enable true colors, see :help xterm-true-color
let &termguicolors = v:true
let &t_8f = "\<Esc>[38;2;%lu;%lu;%lum"
let &t_8b = "\<Esc>[48;2;%lu;%lu;%lum"
" Enable bracketed paste mode, see :help xterm-bracketed-paste
let &t_BE = "\<Esc>[?2004h"
let &t_BD = "\<Esc>[?2004l"
let &t_PS = "\<Esc>[200~"
let &t_PE = "\<Esc>[201~"
" Enable focus event tracking, see :help xterm-focus-event
let &t_fe = "\<Esc>[?1004h"
let &t_fd = "\<Esc>[?1004l"
execute "set <FocusGained>=\<Esc>[I"
execute "set <FocusLost>=\<Esc>[O"
" Enable modified arrow keys, see :help arrow_modifiers
execute "silent! set <xUp>=\<Esc>[@;*A"
execute "silent! set <xDown>=\<Esc>[@;*B"
execute "silent! set <xRight>=\<Esc>[@;*C"
execute "silent! set <xLeft>=\<Esc>[@;*D"
endif

View File

@ -0,0 +1,5 @@
# ZSH setup
1. Place .zshrc file in home directory
2. Create folder called zshrc
3. Include all needed .sh files inside the zshcrc folder
4. To activate the changes right away, make sure to execute `source .zshrc` from within the home directory in your terminal

View File

@ -0,0 +1 @@
export OP_SERVICE_ACCOUNT_TOKEN="$(pass op-sa_token)"

View File

@ -0,0 +1,2 @@
bindkey -v
PS1='%n@%m %~ $ '

View File

@ -0,0 +1,3 @@
for FILE in ~/zshrc/*; do
source $FILE
done