switch to xcfe

This commit is contained in:
2025-08-12 00:30:50 +02:00
parent 073c6a7a64
commit 625c970af0
3 changed files with 102 additions and 113 deletions

View File

@@ -16,13 +16,13 @@ help:
@echo " make local-vm-run - Start the complete workshop environment in a VM"
@echo ""
@echo "--- Cloud Infrastructure ---"
@echo " make cloud-deploy - Deploy VMs to Hetzner"
@echo " make cloud-status - Check the status of cloud servers"
@echo " make cloud-destroy - Destroy all cloud infrastructure"
@echo " make deploy-cloud - Deploy VMs to Hetzner"
@echo " make status-cloud - Check the status of cloud servers"
@echo " make destroy-cloud - Destroy all cloud infrastructure"
@echo ""
@echo "--- USB Drive Creation ---"
@echo " make usb-build - Build the NixOS ISO for the workshop"
@echo " make usb-flash - Flash the ISO to a USB drive (set USB_DEVICE)"
@echo " make build-usb - Build the NixOS ISO for the workshop"
@echo " make flash-usb - Flash the ISO to a USB drive (set USB_DEVICE)"
@echo ""
@echo "--- Host Development Shell ---"
@echo " make local-shell - Enter a dev shell with terraform, etc., on your main machine"
@@ -31,12 +31,20 @@ help:
# --- Local Development ---
local-vm-run:
@echo "🚀 Starting local workshop environment inside a VM..."
@echo " (Close the VM window or press Ctrl+A, X to exit)"
@echo " (A new window with a desktop will open. Close it to stop the VM.)"
nix run --impure .#local-vm
# --- Cloud Infrastructure ---
cloud-deploy:
deploy-cloud:
@echo "🚀 Deploying to Hetzner Cloud..."
cd terraform && terraform init
cd terraform && terraform apply -auto-approve \
-var="hcloud_token=${HCLOUD_TOKEN}" \
-var="domain=${DOMAIN}" \
-var="ssh_public_key=$$(cat ~/.ssh/id_rsa.pub)"
@echo "🔍 Running health checks..."
./scripts/deploy.sh
@echo "✅ Cloud deployment complete and verified!"
status-cloud:
@echo "📊 Checking server status..."
@@ -53,10 +61,12 @@ destroy-cloud:
cd terraform && terraform destroy -auto-approve
# --- USB Boot Drive ---
usb-build:
build-usb:
@echo "🔨 Building NixOS workshop ISO..."
nix build .#live-iso
@echo "✅ ISO built: result/iso/nixos.iso"
flash-usb: usb-build
flash-usb: build-usb
@echo "⚠️ Flashing to ${USB_DEVICE} - THIS WILL ERASE THE DEVICE!"
@read -p "Continue? [y/N]: " confirm && [ "$$confirm" = "y" ]
sudo dd if=result/iso/nixos.iso of=${USB_DEVICE} bs=4M status=progress oflag=sync

109
README.md
View File

@@ -1,89 +1,98 @@
# 🍪 CODE CRISPIES Workshop Infrastructure
Three deployment environments for Co-op Cloud workshop:
This repository contains the infrastructure for the Co-op Cloud workshop, providing three distinct deployment environments.
---
## 🚀 Quick Start
```bash
# 1. Build & flash USB drives
# 1. Start the local development virtual machine
make local-vm-run
# 2. Build & flash USB drives for participants
make build-usb
make flash-usb USB_DEVICE=/dev/sdX
# 2. Deploy cloud infrastructure
export HCLOUD_TOKEN=your_token
# 3. Deploy the production cloud infrastructure
export HCLOUD_TOKEN="your_token_here"
make deploy-cloud
````
# 3. Local development
make local-shell
make local-deploy
make local-ssh
```
-----
## 📁 Project Structure
```
├── flake.nix # USB boot environment
├── local/flake.nix # Local NixOS containers
├── flake.nix # All Nix configurations (USB, VM)
├── terraform/ # Hetzner Cloud infrastructure
├── scripts/deploy.sh # Cloud setup automation
├── docs/USB_BOOT_INSTRUCTIONS.md
└── Makefile # Build & deploy commands
```
-----
## 🌍 Three Environments
### 1. Cloud (Production)
- Hetzner VMs: `hopper.codecrispi.es`, `curie.codecrispi.es`, etc.
- Pre-configured with Docker Swarm + abra
- SSL certificates via Let's Encrypt
### 1\. Cloud (Production)
### 2. USB Boot (Workshop)
- NixOS live environment
- Auto-connects to workshop WiFi
- Helper functions: `connect hopper`, `recipes`, `help`
- SSH into cloud VMs
- [cite\_start]**What:** Hetzner VMs named `hopper.codecrispi.es`, `curie.codecrispi.es`, etc. [cite: 52]
- **Purpose:** The live environment for workshop participants.
### 3. Local (Development)
- NixOS containers: `participant1.local` through `participant15.local`
- Test abra deployments locally
- Isolated Docker Swarm per container
### 2\. USB Boot (Workshop)
## 🔧 Development Workflow
- [cite\_start]**What:** A bootable NixOS live environment. [cite: 4]
- **Purpose:** Used by participants to connect to their cloud servers. [cite\_start]It includes helper functions like `connect hopper`. [cite: 12]
```bash
# Enter development environment
make local-shell
### 3\. Local (Development)
# Deploy local testing environment
make local-deploy
- **What:** A self-contained Virtual Machine (VM) that runs on your local computer.
- **Purpose:** The VM hosts simulated participant containers (e.g., `hopper.local`) and includes a lightweight desktop with a web browser, providing a perfect, isolated environment to test the entire workshop flow without needing cloud servers.
# SSH into local participant container
make local-ssh # Select participant 1-15
-----
# Test app deployment inside container
abra app new wordpress -S --domain=test.participant1.local
abra app deploy test.participant1.local
```
## 🔧 Local Development Workflow
## 📦 Workshop Flow
1. **Start the VM**
Run the following command. A new window will open and automatically boot into a lightweight desktop.
1. **Participant boots USB** → NixOS live environment
2. **Connects to WiFi**`CODE_CRISPIES_GUEST`
3. **SSH to cloud VM**`connect hopper`
4. **Deploy apps**`abra app new wordpress -S --domain=mysite.hopper.codecrispi.es`
5. **Access via browser**`https://mysite.hopper.codecrispi.es`
```bash
make local-vm-run
```
## 🎯 Available Apps
2. **Work Inside the VM**
All testing is now done inside the VM's graphical desktop.
- **WordPress** - CMS/Blog
- **Nextcloud** - File sharing
- **HedgeDoc** - Collaborative markdown
- **Jitsi** - Video conferencing
- **PrestaShop** - E-commerce
* Open the **Terminal** to run commands.
* Open **Firefox** to view the deployed web applications.
3. **Example: Deploying WordPress**
* **In the VM's Terminal**, get a root shell and SSH into the first participant's container:
```bash
# Become root (no password needed)
sudo -i
# Connect to participant 1 (hopper.local)
ssh root@192.168.100.11
```
* **Inside the container**, deploy a WordPress site with `abra`:
```bash
abra app new wordpress -S --domain=blog.hopper.local
abra app deploy blog.hopper.local
```
* **In the VM's Firefox**, navigate to the address `http://blog.hopper.local`. You will see the WordPress installation screen.
-----
## 🧹 Cleanup
```bash
make clean # Clean local artifacts
make destroy-cloud # Destroy Hetzner infrastructure
# Clean local build artifacts
make clean
# Destroy Hetzner cloud infrastructure
make destroy-cloud
# To stop the local VM, simply close its window.
```

View File

@@ -10,18 +10,16 @@
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
participantNames = [
# You can change the range here for local development (e.g., 1 2)
participantRange = nixpkgs.lib.range 1 2;
fullParticipantNames = [
"hopper" "curie" "lovelace" "noether" "hamilton"
"franklin" "johnson" "clarke" "goldberg" "liskov"
"wing" "rosen" "shaw" "karp" "rich"
];
in
{
# --------------------------------------------------------------------------------
# 1. PACKAGES (USB ISO and Local VM)
# --------------------------------------------------------------------------------
packages.${system} = {
# `nix build .#live-iso`
live-iso = nixos-generators.nixosGenerate {
inherit system;
format = "iso";
@@ -29,23 +27,11 @@
({ pkgs, ... }: {
system.stateVersion = "25.05";
networking.networkmanager.enable = true;
systemd.services.workshop-wifi = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
script = ''
${pkgs.networkmanager}/bin/nmcli dev wifi connect "CODE_CRISPIES_GUEST" password "workshop2024" || true
'';
};
services.getty.autologinUser = "workshop";
users.users.workshop = { isNormalUser = true; shell = pkgs.zsh; };
programs.zsh = {
enable = true;
interactiveShellInit = ''
echo "🍪 CODE CRISPIES Workshop Environment"
connect() { ssh -o StrictHostKeyChecking=no workshop@$1.codecrispi.es; }
'';
};
environment.systemPackages = with pkgs; [ openssh curl git networkmanager ];
# Use generic autologin for the live ISO TTY
services.getty.autologinUser = "workshop";
programs.zsh.enable = true;
environment.systemPackages = with pkgs; [ openssh curl git ];
services.xserver = {
enable = true;
displayManager.lightdm.enable = true;
@@ -54,14 +40,9 @@
})
];
};
# `nix run .#local-vm`
local-vm = self.nixosConfigurations.workshop-local.config.system.build.vm;
};
# --------------------------------------------------------------------------------
# 2. STANDALONE LOCAL DEVELOPMENT VM CONFIGURATION
# --------------------------------------------------------------------------------
nixosConfigurations.workshop-local = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
@@ -70,31 +51,30 @@
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
# Create a simple 'workshop' user with no password
users.users.workshop = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # for sudo
password = "";
};
services.getty.autologinUser = "workshop";
# Enable a lightweight desktop environment guaranteed to work in a VM
# --- THIS IS THE CORRECTED SECTION ---
services.xserver = {
enable = true;
displayManager.lightdm.enable = true;
desktopManager.xfce.enable = true;
displayManager.lightdm.enable = true;
};
# Use the generic, documented options for graphical autologin
services.displayManager.autoLogin.enable = true;
services.displayManager.autoLogin.user = "workshop";
# Allow empty passwords for login and sudo
environment.systemPackages = with pkgs; [ gnumake nano nixos-container firefox ];
security.pam.services.login.allowNullPassword = true;
security.sudo.wheelNeedsPassword = false;
networking.hostName = "workshop-vm";
environment.systemPackages = with pkgs; [ gnumake nano nixos-container ];
# Define all participant containers
containers = builtins.listToAttrs (map (i:
let participant = builtins.elemAt participantNames (i - 1);
let participant = builtins.elemAt fullParticipantNames (i - 1);
in {
name = "participant${toString i}";
value = {
@@ -112,39 +92,29 @@
wantedBy = [ "multi-user.target" ];
after = [ "docker.service" ];
script = ''
# Docker Swarm and Abra setup
${pkgs.docker}/bin/docker swarm init --advertise-addr 192.168.100.${toString (10 + i)} || true
${pkgs.docker}/bin/docker network create -d overlay proxy || true
export HOME=/root
${pkgs.curl}/bin/curl -fsSL https://install.abra.coopcloud.tech | ${pkgs.bash}/bin/bash
mkdir -p /root/.abra/servers
/root/.local/bin/abra server add ${participant}.local
export HOME=/root;
${pkgs.docker}/bin/docker swarm init --advertise-addr 192.168.100.${toString (10 + i)} || true;
${pkgs.curl}/bin/curl -fsSL https://install.abra.coopcloud.tech | ${pkgs.bash}/bin/bash;
/root/.local/bin/abra server add ${participant}.local;
'';
serviceConfig = { Type = "oneshot"; RemainAfterExit = true; };
};
};
};
}
) (nixpkgs.lib.range 1 2));
) participantRange);
# DNS for containers
services.dnsmasq = {
enable = true;
settings.address = builtins.concatMap (i:
let participant = builtins.elemAt participantNames (i - 1);
in [
"/${participant}.local/192.168.100.${toString (10 + i)}"
"/.${participant}.local/192.168.100.${toString (10 + i)}"
]
) (nixpkgs.lib.range 1 2);
let participant = builtins.elemAt fullParticipantNames (i - 1);
in [ "/${participant}.local/192.168.100.${toString (10 + i)}" ]
) participantRange;
};
})
];
};
# --------------------------------------------------------------------------------
# 3. DEVELOPMENT SHELL (for your host machine)
# --------------------------------------------------------------------------------
devShells.${system}.default = pkgs.mkShell {
buildInputs = with pkgs; [
terraform