switch to xcfe

This commit is contained in:
2025-08-12 00:30:50 +02:00
parent 073c6a7a64
commit 625c970af0
3 changed files with 102 additions and 113 deletions

View File

@@ -16,13 +16,13 @@ help:
@echo " make local-vm-run - Start the complete workshop environment in a VM" @echo " make local-vm-run - Start the complete workshop environment in a VM"
@echo "" @echo ""
@echo "--- Cloud Infrastructure ---" @echo "--- Cloud Infrastructure ---"
@echo " make cloud-deploy - Deploy VMs to Hetzner" @echo " make deploy-cloud - Deploy VMs to Hetzner"
@echo " make cloud-status - Check the status of cloud servers" @echo " make status-cloud - Check the status of cloud servers"
@echo " make cloud-destroy - Destroy all cloud infrastructure" @echo " make destroy-cloud - Destroy all cloud infrastructure"
@echo "" @echo ""
@echo "--- USB Drive Creation ---" @echo "--- USB Drive Creation ---"
@echo " make usb-build - Build the NixOS ISO for the workshop" @echo " make build-usb - Build the NixOS ISO for the workshop"
@echo " make usb-flash - Flash the ISO to a USB drive (set USB_DEVICE)" @echo " make flash-usb - Flash the ISO to a USB drive (set USB_DEVICE)"
@echo "" @echo ""
@echo "--- Host Development Shell ---" @echo "--- Host Development Shell ---"
@echo " make local-shell - Enter a dev shell with terraform, etc., on your main machine" @echo " make local-shell - Enter a dev shell with terraform, etc., on your main machine"
@@ -31,12 +31,20 @@ help:
# --- Local Development --- # --- Local Development ---
local-vm-run: local-vm-run:
@echo "🚀 Starting local workshop environment inside a VM..." @echo "🚀 Starting local workshop environment inside a VM..."
@echo " (Close the VM window or press Ctrl+A, X to exit)" @echo " (A new window with a desktop will open. Close it to stop the VM.)"
nix run --impure .#local-vm nix run --impure .#local-vm
# --- Cloud Infrastructure --- # --- Cloud Infrastructure ---
cloud-deploy: deploy-cloud:
@echo "🚀 Deploying to Hetzner Cloud..."
cd terraform && terraform init
cd terraform && terraform apply -auto-approve \
-var="hcloud_token=${HCLOUD_TOKEN}" \
-var="domain=${DOMAIN}" \
-var="ssh_public_key=$$(cat ~/.ssh/id_rsa.pub)"
@echo "🔍 Running health checks..."
./scripts/deploy.sh ./scripts/deploy.sh
@echo "✅ Cloud deployment complete and verified!"
status-cloud: status-cloud:
@echo "📊 Checking server status..." @echo "📊 Checking server status..."
@@ -53,10 +61,12 @@ destroy-cloud:
cd terraform && terraform destroy -auto-approve cd terraform && terraform destroy -auto-approve
# --- USB Boot Drive --- # --- USB Boot Drive ---
usb-build: build-usb:
@echo "🔨 Building NixOS workshop ISO..."
nix build .#live-iso nix build .#live-iso
@echo "✅ ISO built: result/iso/nixos.iso"
flash-usb: usb-build flash-usb: build-usb
@echo "⚠️ Flashing to ${USB_DEVICE} - THIS WILL ERASE THE DEVICE!" @echo "⚠️ Flashing to ${USB_DEVICE} - THIS WILL ERASE THE DEVICE!"
@read -p "Continue? [y/N]: " confirm && [ "$$confirm" = "y" ] @read -p "Continue? [y/N]: " confirm && [ "$$confirm" = "y" ]
sudo dd if=result/iso/nixos.iso of=${USB_DEVICE} bs=4M status=progress oflag=sync sudo dd if=result/iso/nixos.iso of=${USB_DEVICE} bs=4M status=progress oflag=sync

109
README.md
View File

@@ -1,89 +1,98 @@
# 🍪 CODE CRISPIES Workshop Infrastructure # 🍪 CODE CRISPIES Workshop Infrastructure
Three deployment environments for Co-op Cloud workshop: This repository contains the infrastructure for the Co-op Cloud workshop, providing three distinct deployment environments.
---
## 🚀 Quick Start ## 🚀 Quick Start
```bash ```bash
# 1. Build & flash USB drives # 1. Start the local development virtual machine
make local-vm-run
# 2. Build & flash USB drives for participants
make build-usb make build-usb
make flash-usb USB_DEVICE=/dev/sdX make flash-usb USB_DEVICE=/dev/sdX
# 2. Deploy cloud infrastructure # 3. Deploy the production cloud infrastructure
export HCLOUD_TOKEN=your_token export HCLOUD_TOKEN="your_token_here"
make deploy-cloud make deploy-cloud
````
# 3. Local development -----
make local-shell
make local-deploy
make local-ssh
```
## 📁 Project Structure ## 📁 Project Structure
``` ```
├── flake.nix # USB boot environment ├── flake.nix # All Nix configurations (USB, VM)
├── local/flake.nix # Local NixOS containers
├── terraform/ # Hetzner Cloud infrastructure ├── terraform/ # Hetzner Cloud infrastructure
├── scripts/deploy.sh # Cloud setup automation ├── scripts/deploy.sh # Cloud setup automation
├── docs/USB_BOOT_INSTRUCTIONS.md ├── docs/USB_BOOT_INSTRUCTIONS.md
└── Makefile # Build & deploy commands └── Makefile # Build & deploy commands
``` ```
-----
## 🌍 Three Environments ## 🌍 Three Environments
### 1. Cloud (Production) ### 1\. Cloud (Production)
- Hetzner VMs: `hopper.codecrispi.es`, `curie.codecrispi.es`, etc.
- Pre-configured with Docker Swarm + abra
- SSL certificates via Let's Encrypt
### 2. USB Boot (Workshop) - [cite\_start]**What:** Hetzner VMs named `hopper.codecrispi.es`, `curie.codecrispi.es`, etc. [cite: 52]
- NixOS live environment - **Purpose:** The live environment for workshop participants.
- Auto-connects to workshop WiFi
- Helper functions: `connect hopper`, `recipes`, `help`
- SSH into cloud VMs
### 3. Local (Development) ### 2\. USB Boot (Workshop)
- NixOS containers: `participant1.local` through `participant15.local`
- Test abra deployments locally
- Isolated Docker Swarm per container
## 🔧 Development Workflow - [cite\_start]**What:** A bootable NixOS live environment. [cite: 4]
- **Purpose:** Used by participants to connect to their cloud servers. [cite\_start]It includes helper functions like `connect hopper`. [cite: 12]
```bash ### 3\. Local (Development)
# Enter development environment
make local-shell
# Deploy local testing environment - **What:** A self-contained Virtual Machine (VM) that runs on your local computer.
make local-deploy - **Purpose:** The VM hosts simulated participant containers (e.g., `hopper.local`) and includes a lightweight desktop with a web browser, providing a perfect, isolated environment to test the entire workshop flow without needing cloud servers.
# SSH into local participant container -----
make local-ssh # Select participant 1-15
# Test app deployment inside container ## 🔧 Local Development Workflow
abra app new wordpress -S --domain=test.participant1.local
abra app deploy test.participant1.local
```
## 📦 Workshop Flow 1. **Start the VM**
Run the following command. A new window will open and automatically boot into a lightweight desktop.
1. **Participant boots USB** → NixOS live environment ```bash
2. **Connects to WiFi**`CODE_CRISPIES_GUEST` make local-vm-run
3. **SSH to cloud VM**`connect hopper` ```
4. **Deploy apps**`abra app new wordpress -S --domain=mysite.hopper.codecrispi.es`
5. **Access via browser**`https://mysite.hopper.codecrispi.es`
## 🎯 Available Apps 2. **Work Inside the VM**
All testing is now done inside the VM's graphical desktop.
- **WordPress** - CMS/Blog * Open the **Terminal** to run commands.
- **Nextcloud** - File sharing * Open **Firefox** to view the deployed web applications.
- **HedgeDoc** - Collaborative markdown
- **Jitsi** - Video conferencing 3. **Example: Deploying WordPress**
- **PrestaShop** - E-commerce
* **In the VM's Terminal**, get a root shell and SSH into the first participant's container:
```bash
# Become root (no password needed)
sudo -i
# Connect to participant 1 (hopper.local)
ssh root@192.168.100.11
```
* **Inside the container**, deploy a WordPress site with `abra`:
```bash
abra app new wordpress -S --domain=blog.hopper.local
abra app deploy blog.hopper.local
```
* **In the VM's Firefox**, navigate to the address `http://blog.hopper.local`. You will see the WordPress installation screen.
-----
## 🧹 Cleanup ## 🧹 Cleanup
```bash ```bash
make clean # Clean local artifacts # Clean local build artifacts
make destroy-cloud # Destroy Hetzner infrastructure make clean
# Destroy Hetzner cloud infrastructure
make destroy-cloud
# To stop the local VM, simply close its window.
``` ```

View File

@@ -10,18 +10,16 @@
let let
system = "x86_64-linux"; system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
participantNames = [ # You can change the range here for local development (e.g., 1 2)
participantRange = nixpkgs.lib.range 1 2;
fullParticipantNames = [
"hopper" "curie" "lovelace" "noether" "hamilton" "hopper" "curie" "lovelace" "noether" "hamilton"
"franklin" "johnson" "clarke" "goldberg" "liskov" "franklin" "johnson" "clarke" "goldberg" "liskov"
"wing" "rosen" "shaw" "karp" "rich" "wing" "rosen" "shaw" "karp" "rich"
]; ];
in in
{ {
# --------------------------------------------------------------------------------
# 1. PACKAGES (USB ISO and Local VM)
# --------------------------------------------------------------------------------
packages.${system} = { packages.${system} = {
# `nix build .#live-iso`
live-iso = nixos-generators.nixosGenerate { live-iso = nixos-generators.nixosGenerate {
inherit system; inherit system;
format = "iso"; format = "iso";
@@ -29,23 +27,11 @@
({ pkgs, ... }: { ({ pkgs, ... }: {
system.stateVersion = "25.05"; system.stateVersion = "25.05";
networking.networkmanager.enable = true; networking.networkmanager.enable = true;
systemd.services.workshop-wifi = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
script = ''
${pkgs.networkmanager}/bin/nmcli dev wifi connect "CODE_CRISPIES_GUEST" password "workshop2024" || true
'';
};
services.getty.autologinUser = "workshop";
users.users.workshop = { isNormalUser = true; shell = pkgs.zsh; }; users.users.workshop = { isNormalUser = true; shell = pkgs.zsh; };
programs.zsh = { # Use generic autologin for the live ISO TTY
enable = true; services.getty.autologinUser = "workshop";
interactiveShellInit = '' programs.zsh.enable = true;
echo "🍪 CODE CRISPIES Workshop Environment" environment.systemPackages = with pkgs; [ openssh curl git ];
connect() { ssh -o StrictHostKeyChecking=no workshop@$1.codecrispi.es; }
'';
};
environment.systemPackages = with pkgs; [ openssh curl git networkmanager ];
services.xserver = { services.xserver = {
enable = true; enable = true;
displayManager.lightdm.enable = true; displayManager.lightdm.enable = true;
@@ -54,14 +40,9 @@
}) })
]; ];
}; };
# `nix run .#local-vm`
local-vm = self.nixosConfigurations.workshop-local.config.system.build.vm; local-vm = self.nixosConfigurations.workshop-local.config.system.build.vm;
}; };
# --------------------------------------------------------------------------------
# 2. STANDALONE LOCAL DEVELOPMENT VM CONFIGURATION
# --------------------------------------------------------------------------------
nixosConfigurations.workshop-local = nixpkgs.lib.nixosSystem { nixosConfigurations.workshop-local = nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
modules = [ modules = [
@@ -70,31 +51,30 @@
boot.loader.grub.enable = false; boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true; boot.loader.generic-extlinux-compatible.enable = true;
# Create a simple 'workshop' user with no password
users.users.workshop = { users.users.workshop = {
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; # for sudo extraGroups = [ "wheel" ]; # for sudo
password = ""; password = "";
}; };
services.getty.autologinUser = "workshop";
# Enable a lightweight desktop environment guaranteed to work in a VM # --- THIS IS THE CORRECTED SECTION ---
services.xserver = { services.xserver = {
enable = true; enable = true;
displayManager.lightdm.enable = true;
desktopManager.xfce.enable = true; desktopManager.xfce.enable = true;
displayManager.lightdm.enable = true;
}; };
# Use the generic, documented options for graphical autologin
services.displayManager.autoLogin.enable = true;
services.displayManager.autoLogin.user = "workshop";
# Allow empty passwords for login and sudo
environment.systemPackages = with pkgs; [ gnumake nano nixos-container firefox ];
security.pam.services.login.allowNullPassword = true; security.pam.services.login.allowNullPassword = true;
security.sudo.wheelNeedsPassword = false; security.sudo.wheelNeedsPassword = false;
networking.hostName = "workshop-vm"; networking.hostName = "workshop-vm";
environment.systemPackages = with pkgs; [ gnumake nano nixos-container ];
# Define all participant containers
containers = builtins.listToAttrs (map (i: containers = builtins.listToAttrs (map (i:
let participant = builtins.elemAt participantNames (i - 1); let participant = builtins.elemAt fullParticipantNames (i - 1);
in { in {
name = "participant${toString i}"; name = "participant${toString i}";
value = { value = {
@@ -112,39 +92,29 @@
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "docker.service" ]; after = [ "docker.service" ];
script = '' script = ''
# Docker Swarm and Abra setup export HOME=/root;
${pkgs.docker}/bin/docker swarm init --advertise-addr 192.168.100.${toString (10 + i)} || true ${pkgs.docker}/bin/docker swarm init --advertise-addr 192.168.100.${toString (10 + i)} || true;
${pkgs.docker}/bin/docker network create -d overlay proxy || true ${pkgs.curl}/bin/curl -fsSL https://install.abra.coopcloud.tech | ${pkgs.bash}/bin/bash;
export HOME=/root /root/.local/bin/abra server add ${participant}.local;
${pkgs.curl}/bin/curl -fsSL https://install.abra.coopcloud.tech | ${pkgs.bash}/bin/bash
mkdir -p /root/.abra/servers
/root/.local/bin/abra server add ${participant}.local
''; '';
serviceConfig = { Type = "oneshot"; RemainAfterExit = true; }; serviceConfig = { Type = "oneshot"; RemainAfterExit = true; };
}; };
}; };
}; };
} }
) (nixpkgs.lib.range 1 2)); ) participantRange);
# DNS for containers
services.dnsmasq = { services.dnsmasq = {
enable = true; enable = true;
settings.address = builtins.concatMap (i: settings.address = builtins.concatMap (i:
let participant = builtins.elemAt participantNames (i - 1); let participant = builtins.elemAt fullParticipantNames (i - 1);
in [ in [ "/${participant}.local/192.168.100.${toString (10 + i)}" ]
"/${participant}.local/192.168.100.${toString (10 + i)}" ) participantRange;
"/.${participant}.local/192.168.100.${toString (10 + i)}"
]
) (nixpkgs.lib.range 1 2);
}; };
}) })
]; ];
}; };
# --------------------------------------------------------------------------------
# 3. DEVELOPMENT SHELL (for your host machine)
# --------------------------------------------------------------------------------
devShells.${system}.default = pkgs.mkShell { devShells.${system}.default = pkgs.mkShell {
buildInputs = with pkgs; [ buildInputs = with pkgs; [
terraform terraform