Compare commits

..

1 commit

Author SHA1 Message Date
sid
951182f6df initial commit
All checks were successful
Flake check / flake-check (pull_request) Successful in 4m23s
2026-03-30 12:35:50 +02:00
6 changed files with 210 additions and 21 deletions

View file

@ -27,7 +27,7 @@ Add this repo to your flake inputs:
inputs.synix.url = "git+https://git.sid.ovh/sid/synix.git"; inputs.synix.url = "git+https://git.sid.ovh/sid/synix.git";
``` ```
See the [documentation](https://doc.sid.ovh) for a full setup guide. See the [documentation](https://doc.sid.ovh/synix) for a full setup guide.
## Templates ## Templates

View file

@ -3,30 +3,15 @@
let let
cfg = config.wayland.windowManager.hyprland; cfg = config.wayland.windowManager.hyprland;
nonFloatingClasses = [ inherit (builtins) toString;
"Gimp"
"steam"
"KiCad"
];
nonFloatingClassesRegex = concatStringsSep "|" nonFloatingClasses;
inherit (builtins) concatStringsSep toString;
inherit (lib) mkDefault; inherit (lib) mkDefault;
in in
{ {
# Do not add binds here. Use `./binds/default.nix` instead. # Do not add binds here. Use `./binds/default.nix` instead.
"$mod" = cfg.modifier; "$mod" = cfg.modifier;
exec-once = [
"dbus-update-activation-environment --systemd WAYLAND_DISPLAY XDG_CURRENT_DESKTOP" # dbus package comes from NixOS module option `services.dbus.dbusPackage` [1]
"gnome-keyring-daemon --start --components=secrets" # gnome-keyring package comes from NixOS module `services.gnome.gnome-keyring` [1]
];
# 1: see Hyprland NixOS module
windowrule = [ windowrule = [
# "float, class:^(${nonFloatingClassesRegex})$" "center, floating:1, not class:^(Gimp)$, not class:^(steam)$"
"center, floating:1, class:^(?!.*(${nonFloatingClassesRegex})).*$"
"float, title:^(Open|Save) Files?$" "float, title:^(Open|Save) Files?$"
"noborder, onworkspace:w[t1]" "noborder, onworkspace:w[t1]"
"bordersize ${toString cfg.settings.general.border_size}, floating:1" "bordersize ${toString cfg.settings.general.border_size}, floating:1"

View file

@ -33,4 +33,5 @@
virtualisation = import ./virtualisation; virtualisation = import ./virtualisation;
webPage = import ./webPage; webPage = import ./webPage;
windows-oci = import ./windows-oci; windows-oci = import ./windows-oci;
zfs = import ./zfs;
} }

View file

@ -8,14 +8,11 @@ in
programs.dconf.enable = true; # fixes nixvim hm module programs.dconf.enable = true; # fixes nixvim hm module
services.flatpak.enable = true;
xdg.portal = { xdg.portal = {
enable = true; enable = true;
extraPortals = with pkgs; [ extraPortals = with pkgs; [
xdg-desktop-portal-gtk xdg-desktop-portal-gtk
]; ];
config.common.default = "gtk";
}; };
services.gnome.gnome-keyring.enable = true; services.gnome.gnome-keyring.enable = true;

View file

@ -0,0 +1,55 @@
{
config,
pkgs,
lib,
...
}:
# Set `networking.hostId` to:
# $ head -c 8 /etc/machine-id
# Mark datasets to snapshot:
# $ sudo zfs set com.sun:auto-snapshot:daily=true dpool/data/backup
# Generate SSH key for replication (empty passphrase):
# $ sudo -i ssh-keygen -t rsa -b 4096 -f /root/.ssh/zfs-replication
let
cfg = config.services.zfs;
inherit (lib) mkDefault mkForce;
in
{
boot.supportedFilesystems = [ "zfs" ];
boot.loader.systemd-boot.enable = mkForce false;
boot.loader.grub.enable = mkForce true;
boot.loader.grub.zfsSupport = mkForce true;
services.zfs.trim = {
enable = mkDefault true;
interval = mkDefault "weekly";
};
services.zfs.scrub = {
enable = mkDefault true;
interval = mkDefault "monthly";
};
services.zfs.autoSnapshot = {
enable = mkDefault true;
flags = mkDefault "-k -p --utc";
frequent = mkDefault 0;
hourly = mkDefault 24;
daily = mkDefault 7;
weekly = mkDefault 4;
monthly = mkDefault 0;
};
services.zfs.autoReplication = {
username = mkDefault "root";
identityFilePath = mkDefault "/root/.ssh/zfs-replication";
followDelete = mkDefault true;
};
environment.systemPackages = with pkgs; [ lz4 ];
}

151
modules/nixos/zfs/disks.sh Normal file
View file

@ -0,0 +1,151 @@
#!/usr/bin/env bash
declare -a SSDs=(
# '/dev/disk/by-id/abc123'
)
declare -a HDDs=(
# '/dev/disk/by-id/def456'
)
declare -a DATA_DATASETS=(
# 'dataset'
)
MNT='/mnt'
SWAP_GB=32
# Helper function to wait for devices
wait_for_device() {
local device=$1
echo "Waiting for device: $device ..."
while [[ ! -e $device ]]; do
sleep 1
done
echo "Device $device is ready."
}
# Function to install a package if it's not already installed
install_if_missing() {
local cmd="$1"
local package="$2"
if ! command -v "$cmd" &> /dev/null; then
echo "$cmd not found, installing $package..."
nix-env -iA "nixos.$package"
fi
}
install_if_missing "sgdisk" "gptfdisk"
install_if_missing "partprobe" "parted"
# Ensure swap parts are off
swapoff --all
udevadm settle
### SSDs ###
echo "Setting up SSDs..."
# Wait for SSD devices to be ready
for ssd in "${SSDs[@]}"; do
wait_for_device "$ssd"
done
# Wipe and partition SSDs
for i in "${!SSDs[@]}"; do
ssd="${SSDs[$i]}"
ssd_num=$((i + 1))
echo "Processing SSD $ssd_num: $ssd"
# Wipe filesystems
wipefs -a "$ssd"
# Clear part tables
sgdisk --zap-all "$ssd"
# Partition disk
sgdisk -n1:1M:+1G -t1:EF00 -c1:BOOT$ssd_num "$ssd"
sgdisk -n2:0:+"$SWAP_GB"G -t2:8200 -c2:SWAP$ssd_num "$ssd"
sgdisk -n3:0:0 -t3:BF00 -c3:ROOT$ssd_num "$ssd"
partprobe -s "$ssd"
udevadm settle
wait_for_device "${ssd}-part3"
done
# Create root pool
echo "Creating root pool..."
zpool create -f -o ashift=12 -o autotrim=on -R "$MNT" -O acltype=posixacl -O canmount=off -O dnodesize=auto -O normalization=formD -O relatime=on -O xattr=sa -O mountpoint=none rpool mirror "${SSDs[0]}"-part3 "${SSDs[1]}"-part3
# Create and mount root system container
zfs create -o canmount=noauto -o mountpoint=legacy rpool/root
mount -o X-mount.mkdir -t zfs rpool/root "$MNT"
# Create root datasets
declare -a ROOT_DATASETS=('home' 'nix' 'tmp' 'var')
for dataset in "${ROOT_DATASETS[@]}"; do
zfs create -o mountpoint=legacy "rpool/$dataset"
mount -o X-mount.mkdir -t zfs "rpool/$dataset" "$MNT/$dataset"
done
# Format boot and swap partitions
for i in "${!SSDs[@]}"; do
ssd="${SSDs[$i]}"
ssd_num=$((i + 1))
mkfs.vfat -F 32 -n BOOT$ssd_num "${ssd}"-part1
mkswap -L SWAP$ssd_num "${ssd}"-part2
swapon -L SWAP$ssd_num
done
# Mount first boot partition
mount -t vfat -o fmask=0077,dmask=0077,iocharset=iso8859-1,X-mount.mkdir -L BOOT1 "$MNT"/boot
### HDDs ###
echo "Setting up HDDs..."
# Wait for HDD devices to be ready
for hdd in "${HDDs[@]}"; do
wait_for_device "$hdd"
done
# Wipe and partition HDDs
for i in "${!HDDs[@]}"; do
hdd="${HDDs[$i]}"
hdd_num=$((i + 1))
echo "Processing HDD $hdd_num: $hdd"
wipefs -a "$hdd"
sgdisk --zap-all "$hdd"
sgdisk -n1:0:0 -t1:BF00 -c1:DATA$hdd_num "$hdd"
done
udevadm settle
# Wait for all HDD partitions to appear
for hdd in "${HDDs[@]}"; do
wait_for_device "${hdd}-part1"
done
# Create data pool
echo "Creating data pool..."
mkdir -p "$MNT"/data
hdd_partitions=()
for hdd in "${HDDs[@]}"; do
hdd_partitions+=("${hdd}-part1")
done
zpool create -f -o ashift=12 -o autotrim=on -R "$MNT" -O acltype=posixacl -O xattr=sa -O dnodesize=auto -O compression=lz4 -O normalization=formD -O relatime=on -O mountpoint=none dpool raidz "${hdd_partitions[@]}"
# Create and mount data root container
zfs create -o canmount=noauto -o mountpoint=legacy dpool/data
mount -o X-mount.mkdir -t zfs dpool/data "$MNT"/data
# Create and mount data datasets
for dataset in "${DATA_DATASETS[@]}"; do
zfs create -o mountpoint=legacy "dpool/data/$dataset"
mount -o X-mount.mkdir -t zfs "dpool/data/$dataset" "$MNT/data/$dataset"
done
echo "Setup complete."