Skip to content
On this page

Docker

Docker Commands CLI

  • dockercli/docker cli
  • Docker Engine

My hychan48 Docker Images

bash
# old but working
docker pull jchan48h/deb-network
docker run -it --rm --name deb-network jchan48h/deb-network
exit
docker image ls

## todo:
docker pull jchan48h/devcontainer

Docker Quick Notes

  • Dockerfile
    • docker run means start and run it
      • normal entry point. can override cmd
    • docker exec means "fork"
      • new process inside the running container
    • docker attach
      • is like using the -it flag or Docker Desktop / VSCode
  • docker-compose.yml
    • docker-compose.override.yml
    • useful for mounting volumes for building
    • when creating services. start with tty etc.
    • naming convention (to add)

Dockerfile

Dockerfile
FROM debian:bookworm-slim
# https://docs.docker.com/reference/dockerfile/

# known dependencies: curl
# dev dependencies: zsh tree tldr git wget
RUN apt-get update && apt-get install -y \
  curl \
  zsh tree wget git \
  && rm -rf /var/lib/apt/lists/*
# rm -rf /var/lib/apt/lists/* is to clean up after apt-get
# maybe add tmux etc. only

# assume mounted docker to /webapp using compose
SHELL ["/bin/bash", "-c"]

# won't actually mount anything. it's for documentation
VOLUME ["/webapp"]
WORKDIR /webapp
# COPY . /webapp
# using compose to mount
# default port for jupterlab
EXPOSE 8888
HEALTHCHECK --interval=5m --timeout=3s \
  CMD curl -f http://localhost:8888/ || exit 1
# entrypoint if mounting using compose has to be in compose
# ENTRYPOINT [ "./entrypoints/smoke-tests.sh" ]
# cmd is also overided by compose since entrypoint was declared there
CMD /bin/zsh

Docker w/o Compose

bash
# docker runs a new instances
docker run -it --rm --name jc --hostname jc alpine:latest /bin/sh
# entrypoint, --env there's alot
# docker run -it --rm --name conda-jupyter-node --hostname cfn conda-jupyter-node:latest ./entrypoints/smoke-test.sh
yaml
# Docker Compose for mounting volumes during build
version: "3.8"
name: conda-jupyter-node
# i guess proper name would be conda-node-jupyter so
# compose would auto append
services:
  webapp:
    # comment out to skip build
    # called docker-webapp for some reason
    container_name: jupyter-webapp
    hostname: jupyter-webapp
    # for dev: maybe should add a dev env file
    stdin_open: true # docker run -i. needed until a service is running
    tty: true        # docker run -t. instead of bash... cuz bash is not a service
    
    build: 
      context: .
      dockerfile: ./Dockerfile
    entrypoint: ./entrypoints/smoke-tests.sh # defn worked
    command: /bin/zsh # since entrypoint is defined
    # image: jchan48h/conda-jupyter-node
    volumes:
      - .:/webapp

Docker Compose

bash
# Docker
# assuming pwd is conda-jupyter-node/
# docker build -t jchan48h/conda-jupyter-node ./docker
docker image prune -f
docker container prune -f
exit
docker compose -f ./docker/docker-compose.yml rm --force --volumes
docker compose -f ./docker/docker-compose.yml down --remove-orphans --volumes
docker compose -f ./docker/docker-compose.yml down
docker compose -f ./docker/docker-compose.yml up --build
docker compose -f ./docker/docker-compose.yml up --build -d
docker compose -f ./docker/docker-compose.yml up -d # detached
docker compose -f ./docker/docker-compose.yml start
docker compose -f ./docker/docker-compose.yml attach webapp # will kill if exit
docker compose -f ./docker/docker-compose.yml exec webapp /bin/zsh

Docker EntryPoint

bash
#!/usr/bin/env bash
# -e is to exit on error, -u is to exit on unset variables, -x is to print each command
# -o pipefail is to exit on error in a pipe
set -euxo pipefail
echo /etc/os-release
printenv | grep -i home
# doesn't need to be bash even
# using healthcheck as well
# important:
exec "$@"
bash
#!/bin/sh
set -e

# Run command with node if the first argument contains a "-" or is not a system command. The last
# part inside the "{}" is a workaround for the following bug in ash/dash:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=874264
if [ "${1#-}" != "${1}" ] || [ -z "$(command -v "${1}")" ] || { [ -f "${1}" ] && ! [ -x "${1}" ]; }; then
  set -- node "$@"
fi

exec "$@"

SSHD Tester

bash
chmod 400 ./id_ed25519
chmod 444 ./id_*.pub # make all readable
# inject self key
bash
# One time
cd deb-sshd-workspace
ssh-keygen -t ed25519 -q -N "" -C "shared-docker" -f ./id_ed25519


# together
cd deb-sshd-workspace
# todo add this somewhere and use docker secrets

docker build -t deb-sshd-workspace:latest -f ./Dockerfile .
docker compose up -d
docker compose up --force-recreate -d
docker compose down

# Exec bash
docker exec -it deb-sshd-workspace-deb1-1 bash
docker exec -it deb-sshd-workspace-deb2-1 bash
docker exec -it deb-sshd-workspace-host-1 bash # host root@localhost:2022
# run it on both - can add a smarter script later
smoke-test.sh
exit

# try host to host - ssh is from cygwin.. that's why
ssh -i ./id_ed25519 -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2022 root@localhost hostname
## debug host:
docker exec -it deb-sshd-workspace-host-1 bash
bash
# ssh_config
IdentityFile ~/.ssh/id_ed25519
StrictHostKeyChecking no
UserKnownHostsFile /dev/null

debugging

bash
# mostly for non-interactive
printenv
# doesnt add
bash -c "set -euxo pipefail && ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null localhost hostname"
# adds to known_hosts
bash -c "set -euxo pipefail && ssh -o BatchMode=yes -o StrictHostKeyChecking=no localhost hostname"
# Check
cd ~/.ssh/ && ls -la
cat ~/.ssh/known_hosts

rm ~/.ssh/known_hosts*
# batchmode seems to be the best
ssh deb1 hostname < /dev/null

Common Commands

bash
docker pull jchan48h/deb-network
docker run -it --rm --name deb-network jchan48h/deb-network
echo hi; exit
bash
# node versions...
## https://hub.docker.com/_/node/
docker pull node:bookworm-slim
exit
docker run -it --rm --name node-slim --hostname node-slim node:bookworm-slim # goes to node
docker run -it --rm --name node-slim --hostname node-slim node:bookworm-slim bash
# yarn is installed but not pnpm...


# pnpm install
## https://pnpm.io/installation
wget -qO- https://get.pnpm.io/install.sh | ENV="$HOME/.bashrc" SHELL="$(which bash)" bash -

Common Images

bash
docker pull debian:bookworm # debian 12
docker pull debian:bookworm-slim
docker pull buildpack-deps:bookworm # 12
  • set hostname / name
  • etc.

Volume

bash
cd /var/lib/docker/volumes/
ls /var/lib/docker/volumes/volumeName/_ata
powershell
# docker desktop can see the Mount point on target
\\wsl.localhost\docker-desktop-data\version-pack-data\community\docker\volumes
cd \\wsl.localhost\docker-desktop-data\version-pack-data\community\docker\volumes
# cd C:\ProgramData\Docker\volumes

wsl -d docker-desktop # root is default
# wsl -d docker-desktop-data # didnt work
cd /mnt/host/wsl/docker-desktop-data/version-pack-data/community/docker/volumes

# with uiid 1? or 0
# docker exec -it mycontainer sh

Compose

bash
docker-compose -f docker-compose-pg-adminer.yaml up
docker-compose -f docker-compose-pg-adminer.yaml rm
docker-compose -f docker-compose-pg-adminer.yaml rm --force -v

# high level?
# container_name: my-web-container
# label

Docker Desktop - GUI

  • there are webguis and http etc.

Apps

Nginx

Postgres / PGAdmin

todo

  • registry
  • more nginx / split
  • ssl network

Windows - Docker Desktop

  • reinstall seems to work
    • careful
    • docker desktop might crash in the background w/o warning
powershell
docker context create remote --docker "host=ssh://your-remote-user@remote-linux-host"
docker context use remote # or have a better name than remote
# switch back
docker context use default
# list
docker context ls

Dockerfile

  • hychan48/devcontainer:latest
  • jchan48h/devcontainer:latest
  • jchan48h/deb-network:latest
bash
# in other directory
docker build -t docker-test:dev -f ./dockers/docker-test/Dockerfile .

# Build Only
docker build -t docker-test:dev -f ./dockers/docker-test/Dockerfile .
(F=./dockers/docker-test/;N="docker-test"; docker build -t $N:dev -f $F/Dockerfile . && docker run -it --name $N --hostname $N --rm $N:dev bash)

# Build and run it - bash - volume mounted
(F=./dockers/docker-test/;N="docker-test"; docker build -t $N:dev -f $F/Dockerfile . && docker run -it --name $N -v "$(pwd):/app" -w "/app" --hostname $N --rm $N:dev bash)

# Build and run zsh + ssh volume
(F=./dockers/docker-test/;N="docker-test"; docker build -t $N:dev -f $F/Dockerfile . && docker run -it --name $N -v "$HOME/.ssh/:/root/.ssh" -v "$(pwd):/app" -w "/app" --hostname $N --rm $N:dev zsh)

Ubuntu Display

  • move me
bash
# ubuntu desktop wayland
export DISPLAY=:0
# need to be on the right user first...
xhost si:localuser:root
# xhost -si:localuser:root

gsettings set org.gnome.desktop.screensaver lock-enabled false
gsettings set org.gnome.desktop.screensaver idle-activation-enabled false

OVMF.fd

python
# Boot Commands when Loading the ISO file with OVMF.fd file (Tianocore) / GrubV2
  boot_command = [
    "<spacebar><wait><spacebar><wait><spacebar><wait><spacebar><wait><spacebar><wait>",
    "e<wait>",
    "<down><down><down><end>",
    " autoinstall ds=nocloud-net\\;s=http://{{ .HTTPIP }}:{{ .HTTPPort }}/",
    "<f10>"
  ]

yaml yq util

Cloud-init yaml

bash
sudo snap install yq
yq --version

ls |grep "user-data.template"
# e evaluate
# ea evaluate-all
# -i edits inline...

yq e '.some.path.to.value = "new value"' -i "user-data.template"
yq e '.some.path.to.value = "new value"' "user-data.template" # actually creates the nest
yq e '.apt.proxy = "http://10.153.219.87:3142"' "user-data.template" > http/user-data
yq e '.apt.proxy = "http://10.153.219.1:3142"' "user-data.template" > http/user-data


# show inet4 only
# inet 10.153.219.1/24 scope global lxdbr0 => 10.153.219
ip addr show lxdbr0 |grep -i "inet " | awk '{print $2}' | cut -d'/' -f1
# 10.153.219

ip addr show lxdbr0  # shows ip
ip link show lxdbr0 # doesnt show ip

yq ea 'select(fileIndex == 0) * select(fileIndex == 1)' file1.yaml file2.yaml > merged.yaml
yq ea 'select(fileIndex == 0) * select(fileIndex == 1)' file1.yaml file2.yaml

HCL Editor

cloud-init restart

bash
# havent tested but if this is the case. vm might be faster then reloading
# lxd can reboot, but need ssh to access
# Remove the cloud-init state files
sudo rm -rf /var/lib/cloud/*

# After removing the state files, you can either reboot or rerun cloud-init
sudo cloud-init init
sudo cloud-init modules --mode config
sudo cloud-init modules --mode final