Newer
Older
skyworks-Nix-infra / hosts / skydick / datapool.nix
# Seagate Mach2 SAS dual-actuator data pool ("dick")
#
# Each Mach2 drive presents TWO LUNs over SAS. CRITICAL: never place both
# LUNs of the same physical drive in the same mirror vdev — a single drive
# failure would kill the entire vdev.
#
# SAS WWN identification:
#   LUN0: wwn-0x6000c500XXXXXXXX0000000000000000
#   LUN1: wwn-0x6000c500XXXXXXXX0001000000000000
#   Same prefix = same physical drive. Pair DIFFERENT drives in each mirror.
#
# Layout (4 active + 1 hot spare, all mirrors for expandability):
#
#   mirror-0:  drive1-LUN0  drive2-LUN0
#   mirror-1:  drive1-LUN1  drive2-LUN1
#   mirror-2:  drive3-LUN0  drive4-LUN0
#   mirror-3:  drive3-LUN1  drive4-LUN1
#   spare:     drive5-LUN0  drive5-LUN1
#
# If drive1 fails: mirror-0 and mirror-1 each lose one member, but stay
# online via drive2. The spare auto-replaces one degraded member.
#
# === Pool creation (fill in actual WWNs) ===
#
#   zpool create -o ashift=12 -o autotrim=on -o failmode=continue \
#     -O compression=zstd -O relatime=on \
#     -O xattr=sa -O acltype=posixacl -O dnodesize=auto \
#     -O normalization=formD -O redundant_metadata=most \
#     -O mountpoint=none -O canmount=off \
#     dick \
#     mirror <drive1-LUN0> <drive2-LUN0> \
#     mirror <drive1-LUN1> <drive2-LUN1> \
#     mirror <drive3-LUN0> <drive4-LUN0> \
#     mirror <drive3-LUN1> <drive4-LUN1> \
#     spare <drive5-LUN0> <drive5-LUN1>
#
# === Dataset creation ===
#
#   zfs create -o mountpoint=/srv/share   -o recordsize=128K                       dick/share
#   zfs create -o mountpoint=/srv/media   -o recordsize=1M   -o compression=off    dick/media
#   zfs create -o mountpoint=/srv/backup  -o recordsize=1M   -o compression=zstd-3 dick/backup
#   zfs create -o mountpoint=/srv/torrent -o recordsize=1M                         dick/torrent
#   zfs create -o mountpoint=/srv/vm      -o recordsize=64K                        dick/vm
#
#   # Set permissions after creation (persisted in ZFS):
#   for d in share media torrent; do chown root:storage /srv/$d && chmod 2775 /srv/$d; done
#   for d in backup vm; do chown root:root /srv/$d && chmod 0700 /srv/$d; done
#
# Dataset rationale:
#   share   — general multi-user storage, default recordsize
#   media   — large media files, 1M for sequential throughput, compression off (pre-compressed)
#   backup  — archival backups, 1M records, zstd-3 for better compression ratio
#   torrent — bittorrent download/seed, 1M records (clients write sequentially per file)
#   vm      — iSCSI zvols for live VMs + backup images, 64K aligns with guest block sizes
#             Create zvols: zfs create -V <size> -o volblocksize=16K dick/vm/<name>
#
# === Expanding the pool ===
#
# Add another pair of Mach2 drives (drive6 + drive7):
#   zpool add dick \
#     mirror <drive6-LUN0> <drive7-LUN0> \
#     mirror <drive6-LUN1> <drive7-LUN1>
#
# === Permission model ===
#
# User-facing datasets (share, media, torrent):
#   root:storage 2775 (setgid — new files inherit storage group)
#   NFS: root_squash, Samba: @storage group
#
# System datasets (backup, vm):
#   root:root 0700
#   NFS: no_root_squash, iSCSI: vm zvols

{ config, pkgs, ... }:

{
  users.groups.storage = {};

  systemd.tmpfiles.rules = [
    "d /srv 0755 root root -"
    "d /srv/share 2775 root storage -"
    "d /srv/media 2775 root storage -"
    "d /srv/backup 0700 root root -"
    "d /srv/torrent 2775 root storage -"
    "d /srv/vm 0700 root root -"
  ];

  # NFS — primary protocol for all datasets
  services.rpcbind.enable = true;

  services.nfs.server = {
    enable = true;
    statdPort = 20001;
    lockdPort = 20002;
    mountdPort = 20003;

    exports = ''
      /srv          10.0.0.0/16(rw,sync,fsid=0,crossmnt,no_subtree_check,root_squash)
      /srv/share    10.0.0.0/16(rw,sync,no_subtree_check,root_squash)
      /srv/media    10.0.0.0/16(ro,async,no_subtree_check,root_squash)
      /srv/backup   10.0.0.0/16(rw,sync,no_subtree_check,no_root_squash)
      /srv/torrent  10.0.0.0/16(rw,sync,no_subtree_check,root_squash)
      /srv/vm       10.0.0.0/16(rw,sync,no_subtree_check,no_root_squash)
    '';
  };

  services.nfs.idmapd.settings = {
    General.Domain = "skydick.local";
    Mapping = {
      Nobody-User = "nobody";
      Nobody-Group = "nogroup";
    };
  };

  # Samba — user-facing datasets only (Windows/Mac convenience)
  services.samba = {
    enable = true;
    openFirewall = false;

    settings = {
      global = {
        workgroup = "WORKGROUP";
        "server string" = "Skydick Storage";
        "netbios name" = "SKYDICK";
        security = "user";
        "hosts allow" = "10.0. 127.";
        "hosts deny" = "ALL";

        "socket options" = "TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=131072 SO_SNDBUF=131072";
        "use sendfile" = "yes";
        "aio read size" = "16384";
        "aio write size" = "16384";

        "map to guest" = "never";
        "server min protocol" = "SMB2_10";

        "load printers" = "no";
      };

      share = {
        path = "/srv/share";
        browseable = "yes";
        "read only" = "no";
        "guest ok" = "no";
        "valid users" = "@storage";
        "create mask" = "0664";
        "directory mask" = "2775";
      };

      media = {
        path = "/srv/media";
        browseable = "yes";
        "read only" = "yes";
        "valid users" = "@storage";
      };

      torrent = {
        path = "/srv/torrent";
        browseable = "yes";
        "read only" = "no";
        "guest ok" = "no";
        "valid users" = "@storage";
        "create mask" = "0664";
        "directory mask" = "2775";
      };
    };
  };

  services.samba-wsdd = {
    enable = true;
    openFirewall = false;
  };

  # iSCSI — vm zvols only
  services.target.enable = true;

  # Firewall: storage service ports
  networking.firewall = {
    allowedTCPPorts = [
      111   # RPC (NFS)
      2049  # NFS
      445   # SMB
      139   # NetBIOS (SMB)
      3260  # iSCSI
    ];
    allowedUDPPorts = [
      111   # RPC (NFS)
      2049  # NFS (NFSv4.1+)
      137   # NetBIOS Name Service
      138   # NetBIOS Datagram
    ];
    allowedTCPPortRanges = [{ from = 20000; to = 20005; }];
    allowedUDPPortRanges = [{ from = 20000; to = 20005; }];
  };
}