From aa0de8a4e100d93b63920d9372e7dca130f37a39 Mon Sep 17 00:00:00 2001 From: Morgan Patterson <31448722+vmorganp@users.noreply.github.com> Date: Wed, 1 Feb 2023 01:33:57 -0700 Subject: [PATCH] ReWork groups (#16) * remove packet count testing subproject * move examples * delete docker ignore after file reorganization * Notes for later me * Update minecraft example * README updates * More readme updates * change groups to group * code comments * remove notes to self * remove ... * fix zerotier example * fix bad copypasta * Readme updates * fix indentation * Create FUNDING.yml --------- Co-authored-by: Morgan --- .dockerignore | 6 - .github/FUNDING.yml | 1 + Dockerfile | 14 +- README.md | 129 +++++---- docker-compose.yaml | 33 ++- .../minecraft}/README.md | 2 +- examples/minecraft/docker-compose.yaml | 30 +++ .../zerotier}/README.md | 0 examples/zerotier/docker-compose.yml | 43 +++ lazytainer.go | 253 ------------------ minecraft_example/docker-compose.yaml | 32 --- go.mod => src/go.mod | 1 + go.sum => src/go.sum | 5 + src/group.go | 184 +++++++++++++ src/lazytainer.go | 159 +++++++++++ zerotier_example/docker-compose.yml | 38 --- 16 files changed, 527 insertions(+), 403 deletions(-) delete mode 100644 .dockerignore create mode 100644 .github/FUNDING.yml rename {minecraft_example => examples/minecraft}/README.md (92%) create mode 100644 examples/minecraft/docker-compose.yaml rename {zerotier_example => examples/zerotier}/README.md (100%) create mode 100644 examples/zerotier/docker-compose.yml delete mode 100644 lazytainer.go delete mode 100644 minecraft_example/docker-compose.yaml rename go.mod => src/go.mod (96%) rename go.sum => src/go.sum (94%) create mode 100644 src/group.go create mode 100644 src/lazytainer.go delete mode 100644 zerotier_example/docker-compose.yml diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index ddefd13..0000000 --- a/.dockerignore +++ /dev/null @@ -1,6 +0,0 @@ -# Ignore everything -* - -# But these files... -!**/go.** -!**.go diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..a47c36f --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: "https://www.buymeacoffee.com/vmorganp" diff --git a/Dockerfile b/Dockerfile index 071ec62..05d38cc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,14 @@ # syntax = docker/dockerfile:latest -FROM golang as build +FROM golang:1.18.9-alpine3.17 as build +RUN apk add --update build-base gcc wget git libpcap-dev WORKDIR /app -COPY . . +COPY src/* /app/ RUN --mount=type=cache,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ go mod tidy; \ - CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o lazytainer ./... + CGO_ENABLED=1 go build -trimpath -ldflags="-s -w" -o lazytainer ./... -FROM scratch -COPY --from=build /app/lazytainer /usr/local/bin/lazytainer -ENTRYPOINT ["/usr/local/bin/lazytainer"] +FROM alpine +RUN apk add --update libpcap-dev +COPY --from=build /app/lazytainer /app/lazytainer +ENTRYPOINT [ "./app/lazytainer" ] \ No newline at end of file diff --git a/README.md b/README.md index 5a4971a..075bcfd 100644 --- a/README.md +++ b/README.md @@ -4,64 +4,87 @@ Putting your containers to sleep --- ## Quick Explanation -Monitors network traffic to containers. If there is traffic, the container runs, otherwise the container is stopped/paused. for more details check out [How it works](#how-it-works). +Monitors network traffic to containers. If there is traffic, the container runs, otherwise the container is stopped/paused. for more details check out the [Configuration](##Configuration) section. ## Want to test it? -``` -$ git clone https://github.com/vmorganp/Lazytainer -$ cd Lazytainer -$ docker-compose up -``` - -## Or put in your docker-compose.yaml -``` - lazytainer: - container_name: lazytainer - image: ghcr.io/vmorganp/lazytainer:master - environment: - - PORT=81 # comma separated list of ports...or just the one - - LABEL=lazytainer # value of lazytainer.marker for other containers that lazytainer checks - # - TIMEOUT=30 # OPTIONAL number of seconds to let container idle - # - MINPACKETTHRESH=10 # OPTIONAL number of packets that must be recieved to keepalive/start container - # - POLLRATE=1 # OPTIONAL number of seconds to sleep between polls - # - VERBOSE=true # probably set this to false unless you're debugging or doing the initial demo - # - INTERFACE=eth0 # OPTIONAL interface to listen on - use eth0 in most cases - ports: - - 81:81 - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro - - whoami1: - container_name: whoami1 - image: containous/whoami - # configuring service ports is container specific. Look up how to do this on your service of choice - command: --port 81 # make this run on the port passed through on lazytainer - network_mode: service:lazytainer - depends_on: - - lazytainer # wait for lazytainer to start before starting - labels: - - "lazytainer.marker=lazytainer" # required label to make it work - - "lazytainer.sleepMethod=stop" # can be either "stop" or "pause", or left blank for stop -``` +1. Clone the project + ``` + git clone https://github.com/vmorganp/Lazytainer + cd Lazytainer + ``` +2. Start the stack + ```sh + docker-compose up + ``` + This will create 2 containers that you can reach through a third "lazytainer" container +3. View the running container by navigating to its web ui at `http://localhost:81`. You should see some information about the container +4. Close the tab and wait until the logs say "stopped container" +6. Navigate again to `http://localhost:81`, it should be a dead page +7. Navigate to `http://localhost:81` several times, enough to generate some network traffic, and it should start +8. To clean up, run + ```sh + docker-compose down + ``` ## Configuration -### Notes -- Lazytainer does not "automatically" start and stop all of your containers. You must apply a label to them and proxy their traffic through the Lazytainer container. +### Note: +Lazytainer does not "automatically" start and stop all of your containers. You must apply a label to them and proxy their traffic through the Lazytainer container. -### Environment Variables -| Variable | Purpose | -| --------------- | ---------------------------------------------------------------------------------------------------------- | -| PORT | Port number(s) to listen for traffic on. If specifying multiple, they should be comma separated | -| LABEL | Value for label `lazytainer.marker` that lazytainer should use to determine which containers to start/stop | -| TIMEOUT | Number of seconds container will be allowed to run with no traffic before it is stopped | -| MINPACKETTHRESH | Minimum amount of received network packets to keep container alive | -| POLLRATE | Number of seconds to wait between polls of network transmission stats | -| VERBOSE | Whether or not to print noisier logs that may be useful for debugging | -| INTERFACE | What interface to check for received packets on | +### Examples +For examples of lazytainer in action, check out the [Examples](./examples/) -## How it works -Lazytainer sits between users and the containers they are accessing, and acts as a network proxy. +### Groups +Lazytainer starts and stops other containers in "groups" of one or more other containers. To assign a container to a lazytainer group, a label must be added. The label will look like this. -Lazytainer checks to see if $MINPACKETTHRESH number of packets have been received in $TIMEOUT number of seconds. If the number of packets is above $MINPACKETTHRESH the container(s) with the label will ALL start/remain on depending on prior state. If the number of packets is less than $MINPACKETTHRESH, the container(s) with the label wil ALL stop/pause or remain stopped/pause depending on prior state and configuration. +```yaml +yourContainerThatWillSleep: + # ... configuration omitted for brevity + labels: + - "lazytainer.group=" +``` -If you use a reverse proxy like Caddy, NGINX, Traefik, or others, you can still point your reverse proxy of choice to your service. Instead of pointing directly at your service, you must instead point your reverse proxy to lazytainer, which will then pass your traffic to your service container. +To configure a group, add labels to the lazytainer container like this. Note that each is required to have a port(s) specified. These ports must also be forwarded on the lazytainer container +```yaml + lazytainer: + # ... configuration omitted for brevity + ports: + - 81:81 # used by group1 and group2 + - 82:82 # used by group2 + labels: + # Configuration items are formatted like this + - "lazytainer.group..=value" + # configuration for group 1 + - "lazytainer.group.group1.ports=81" + # configuration for group 2 + - "lazytainer.group.group2.ports=81,82" +``` + +Group properties that can be changed include: + +| Name | description | required | default | +| ------------------ | -------------------------------------------------------------------------------------- | -------- | ------- | +| ports | Network ports associated with a group, can be comma separated | Yes | n/a | +| inactiveTimeout | Time (seconds) before container is stopped when there is insufficient network activity | No | 30 | +| minPacketThreshold | Minimum count of network packets for container to be on | No | 30 | +| pollRate | How frequently (seconds) to check network activity | No | 30 | +| sleepMethod | How to put the container to sleep. Can be `stop` or `pause` | No | `stop` | +| netInterface | Network interface to listen on | No | `eth0` | + +### Additional Configuration +#### Verbose Logging +If you would like more verbose logging, you can apply the environment variable `VERBOSE=true` to lazytainer like so +```yaml + lazytainer: + # ... configuration omitted for brevity + environment: + - VERBOSE=true +``` + +#### Volumes +If using lazytainer, you MUST provide the following volume to lazytainer +```yaml + lazytainer: + # ... configuration omitted for brevity + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro +``` diff --git a/docker-compose.yaml b/docker-compose.yaml index 6501b1c..712491b 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,39 +2,44 @@ version: "3" services: lazytainer: container_name: lazytainer - image: ghcr.io/vmorganp/lazytainer:master + # image: ghcr.io/vmorganp/lazytainer:master + build: . environment: - - PORT=81,82 # comma separated list of ports...or just the one - - LABEL=lazytainer # value of lazytainer.marker for other containers that lazytainer checks - - TIMEOUT=30 # OPTIONAL number of seconds to let container idle - - MINPACKETTHRESH=10 # OPTIONAL number of packets that must be recieved to keepalive/start container - - POLLRATE=1 # OPTIONAL number of seconds to sleep between polls - - VERBOSE=true # probably set this to false unless you're debugging or doing the initial demo - - INTERFACE=eth0 # OPTIONAL interface to listen on - use eth0 in most cases + - VERBOSE=true # probably set this to false unless you're debugging or doing the initial demo ports: - 81:81 - 82:82 volumes: - /var/run/docker.sock:/var/run/docker.sock:ro + labels: + # this configuration will cause traffic to whoami1 to start whoami2, but traffic to only whoami2 will not wake whoami1 + # if there's no incoming traffic on port 81, pause whoami1 + - "lazytainer.group.group1.pollRate=1" + - "lazytainer.group.group1.inactiveTimeout=10" + - "lazytainer.group.group1.ports=81" + - "lazytainer.group.group1.sleepMethod=pause" # can be either "stop" or "pause", or left blank for stop + # if there's no incoming traffic on port 81 OR 82, pause whoami2 + - "lazytainer.group.group2.pollRate=1" + - "lazytainer.group.group2.inactiveTimeout=10" + - "lazytainer.group.group2.ports=81,82" + - "lazytainer.group.group2.sleepMethod=stop" # can be either "stop" or "pause", or left blank for stop whoami1: container_name: whoami1 image: containous/whoami - command: --port 81 + command: --port 81 # This is specific to containous/whoami network_mode: service:lazytainer depends_on: - lazytainer labels: - - "lazytainer.marker=lazytainer" - - "lazytainer.sleepMethod=pause" # can be either "stop" or "pause", or left blank for stop + - "lazytainer.group=group1" whoami2: container_name: whoami2 image: containous/whoami - command: --port 82 + command: --port 82 # This is specific to containous/whoami network_mode: service:lazytainer depends_on: - lazytainer labels: - - "lazytainer.marker=lazytainer" - - "lazytainer.sleepMethod=stop" # can be either "stop" or "pause", or left blank for stop + - "lazytainer.group=group2" diff --git a/minecraft_example/README.md b/examples/minecraft/README.md similarity index 92% rename from minecraft_example/README.md rename to examples/minecraft/README.md index ac69ca6..4b6ddc9 100644 --- a/minecraft_example/README.md +++ b/examples/minecraft/README.md @@ -2,7 +2,7 @@ ## Startup ``` git clone https://github.com/vmorganp/Lazytainer -cd Lazytainer/minecraft_example +cd Lazytainer/examples/minecraft docker-compose up ``` diff --git a/examples/minecraft/docker-compose.yaml b/examples/minecraft/docker-compose.yaml new file mode 100644 index 0000000..5963159 --- /dev/null +++ b/examples/minecraft/docker-compose.yaml @@ -0,0 +1,30 @@ +version: "3" +services: + lazytainer: + container_name: lazytainer + image: ghcr.io/vmorganp/lazytainer:master + environment: + - VERBOSE=true + ports: + - 25565:25565 + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + labels: + - "lazytainer.group.minecraft.sleepMethod=stop" + - "lazytainer.group.minecraft.ports=25565" + - "lazytainer.group.minecraft.inactiveTimeout=600" # 10 minutes, to allow the server to bootstrap. You can probably make this lower later if you want. + mc: + image: itzg/minecraft-server:java8-multiarch + network_mode: service:lazytainer + environment: + EULA: "TRUE" + TYPE: VANILLA + VERSION: 1.19 + MEMORY: 6G + restart: unless-stopped + labels: + - "lazytainer.group=minecraft" + depends_on: + - lazytainer + volumes: + - /tmp/lazytainerExample/minecraft:/data \ No newline at end of file diff --git a/zerotier_example/README.md b/examples/zerotier/README.md similarity index 100% rename from zerotier_example/README.md rename to examples/zerotier/README.md diff --git a/examples/zerotier/docker-compose.yml b/examples/zerotier/docker-compose.yml new file mode 100644 index 0000000..cc1c5e4 --- /dev/null +++ b/examples/zerotier/docker-compose.yml @@ -0,0 +1,43 @@ +version: "3" +services: + lazytainer: + container_name: lazytainer + image: ghcr.io/vmorganp/lazytainer:master + environment: + - VERBOSE=true # probably set this to false unless you're debugging or doing the initial demo + ports: + - 81:81 + - 82:82 + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + labels: + # if there's no incoming traffic on port 81, stop whoami1 + - "lazytainer.group.group1.pollRate=1" + - "lazytainer.group.group1.inactiveTimeout=30" + - "lazytainer.group.group1.ports=81" + - "lazytainer.group.group1.MINPACKETTHRESH=10" + - "lazytainer.group.group1.sleepMethod=stop" # can be either "stop" or "pause", or left blank for stop + - "lazytainer.group.group1.netInterface=ztukuxxqii" + + zerotier: + image: zyclonite/zerotier + container_name: zerotier + network_mode: "service:lazytainer" + devices: + - /dev/net/tun + volumes: + - './zt:/var/lib/zerotier-one' + cap_add: + - NET_ADMIN + - SYS_ADMIN + + whoami1: + container_name: whoami1 + image: containous/whoami + command: --port 81 + network_mode: service:lazytainer + depends_on: + - lazytainer + labels: + - "lazytainer.group=group1" + diff --git a/lazytainer.go b/lazytainer.go deleted file mode 100644 index 2839a98..0000000 --- a/lazytainer.go +++ /dev/null @@ -1,253 +0,0 @@ -package main - -import ( - "context" - "fmt" - "math" - "os" - "strconv" - "strings" - "time" - - "github.com/cakturk/go-netstat/netstat" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" -) - -var ( - label string - portsArray []string - - ports string - inactiveTimeout int - minPacketThreshold int - pollRate int - verbose bool - listenInterface string -) - -func main() { - setVarsFromEnv() - inactiveSeconds := 0 - rxHistory := make([]int, int(math.Ceil(float64(inactiveTimeout/pollRate)))) - sleepTime := time.Duration(pollRate) * time.Second - for { - rxHistory = append(rxHistory[1:], getRxPackets()) - if rxHistory[0] > rxHistory[len(rxHistory)-1] { - rxHistory = make([]int, int(math.Ceil(float64(inactiveTimeout/pollRate)))) - if verbose { - fmt.Println("rx packets overflowed and reset") - } - } - // if the container is running, see if it needs to be stopped - if isContainerOn() { - if verbose { - fmt.Println(rxHistory[len(rxHistory)-1]-rxHistory[0], "packets received in the last", inactiveTimeout, "seconds") - } - // if no clients are active on ports and threshold packets haven't been received in TIMEOUT secs - if getActiveClients() == 0 && rxHistory[0]+minPacketThreshold > rxHistory[len(rxHistory)-1] { - // count up if no active clients - inactiveSeconds = inactiveSeconds + pollRate - fmt.Println(inactiveSeconds, "/", inactiveTimeout, "seconds without an active client or sufficient traffic on running container") - if inactiveSeconds >= inactiveTimeout { - stopContainers() - } - } else { - inactiveSeconds = 0 - } - } else { - // if more than THRESHOLD rx in last RXHISTSECONDS seconds, start the container - if rxHistory[0]+minPacketThreshold < rxHistory[len(rxHistory)-1] { - inactiveSeconds = 0 - startContainers() - } else { - if verbose { - fmt.Println(rxHistory[len(rxHistory)-1], "received out of", rxHistory[0]+minPacketThreshold, "packets needed to restart container") - } - } - } - time.Sleep(sleepTime) - if verbose { - fmt.Println("//////////////////////////////////////////////////////////////////////////////////") - } - } -} - -func setVarsFromEnv() { - label = os.Getenv("LABEL") - if label == "" { - panic("you must set env variable LABEL") - } - - portsCSV := os.Getenv("PORT") - if portsCSV == "" { - panic("you must set env variable PORT") - } - - // ports to check for active connections - portsArray = strings.Split(string(strings.TrimSpace(string(portsCSV))), ",") - - // logging level, should probably use a lib for this - verboseString := os.Getenv("VERBOSE") - if strings.ToLower(verboseString) == "true" { - verbose = true - } - - var err error - - // how long a container is allowed to have no traffic before being stopped - inactiveTimeout, err = strconv.Atoi(os.Getenv("TIMEOUT")) - if err != nil { - if strings.Contains(err.Error(), "strconv.Atoi: parsing \"\": invalid syntax") { - fmt.Println("using default 60 because env variable TIMEOUT not set ") - inactiveTimeout = 60 - } else { - panic(err) - } - } - - // number of packets required between first and last poll to keep container alive - minPacketThreshold, err = strconv.Atoi(os.Getenv("MINPACKETTHRESH")) - if err != nil { - if strings.Contains(err.Error(), "strconv.Atoi: parsing \"\": invalid syntax") { - fmt.Println("using default 10 because env variable MINPACKETTHRESH not set ") - minPacketThreshold = 10 - } else { - panic(err) - } - } - - // how many seconds to wait in between polls - pollRate, err = strconv.Atoi(os.Getenv("POLLRATE")) - if err != nil { - if strings.Contains(err.Error(), "strconv.Atoi: parsing \"\": invalid syntax") { - fmt.Println("using default 5 because env variable POLLRATE not set ") - pollRate = 5 - } else { - panic(err) - } - } - - listenInterface = os.Getenv("INTERFACE") - if listenInterface == "" { - fmt.Println("using default eth0 because env variable INTERFACE not set ") - listenInterface = "eth0" - } -} - -func getRxPackets() int { - // get rx packets outside of the if bc we do it either way - rx, err := os.ReadFile("/sys/class/net/" + listenInterface + "/statistics/rx_packets") - check(err) - rxPackets, err := strconv.Atoi(strings.TrimSpace(string(rx))) - check(err) - if verbose { - fmt.Println(rxPackets, "rx packets") - } - return rxPackets -} - -func getActiveClients() int { - // get active clients - var allSocks []netstat.SockTabEntry - udpSocks, err := netstat.UDPSocks(netstat.NoopFilter) - check(err) - udp6Socks, err := netstat.UDP6Socks(netstat.NoopFilter) - check(err) - tcpSocks, err := netstat.TCPSocks(netstat.NoopFilter) - check(err) - tcp6Socks, err := netstat.TCP6Socks(netstat.NoopFilter) - check(err) - - activeClients := 0 - for _, socketEntry := range append(append(append(append(allSocks, udp6Socks...), tcp6Socks...), tcpSocks...), udpSocks...) { - if socketEntry.State.String() == "ESTABLISHED" { - for _, aPort := range portsArray { - if aPort == fmt.Sprintf("%v", socketEntry.LocalAddr.Port) { - activeClients++ - } - } - } - } - if verbose { - fmt.Println(activeClients, "active clients") - } - return activeClients -} - -func getContainers() []types.Container { - if label == "" { - panic("environment variable LABEL must be set") - } - - dockerClient, err := client.NewClientWithOpts(client.FromEnv) - check(err) - filter := filters.NewArgs(filters.Arg("label", "lazytainer.marker="+label)) - containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter}) - check(err) - - // for _, container := range containers { - // fmt.Printf("%s %s %s\n", container.ID[:10], container.Image, container.State) - // } - return containers -} - -func isContainerOn() bool { - for _, c := range getContainers() { - if c.State == "running" { - return true - } - } - return false -} - -func stopContainers() { - fmt.Println("stopping container(s)") - dockerClient, err := client.NewClientWithOpts(client.FromEnv) - check(err) - for _, c := range getContainers() { - stopMethod := strings.ToLower(c.Labels["lazytainer.sleepMethod"]) - if stopMethod == "stop" || stopMethod == "" { - if err := dockerClient.ContainerStop(context.Background(), c.ID, nil); err != nil { - fmt.Printf("Unable to stop container %s: %s\n", c.Names[0], err) - } else { - fmt.Println("stopped container ", c.Names[0]) - } - } else if stopMethod == "pause" { - if err := dockerClient.ContainerPause(context.Background(), c.ID); err != nil { - fmt.Printf("Unable to pause container %s: %s\n", c.Names[0], err) - } else { - fmt.Println("paused container ", c.Names[0]) - } - } - } -} - -func startContainers() { - fmt.Println("starting container(s)") - dockerClient, err := client.NewClientWithOpts(client.FromEnv) - check(err) - for _, c := range getContainers() { - stopMethod := strings.ToLower(c.Labels["lazytainer.sleepMethod"]) - if stopMethod == "stop" || stopMethod == "" { - if err := dockerClient.ContainerStart(context.Background(), c.ID, types.ContainerStartOptions{}); err != nil { - fmt.Printf("Unable to start container %s: %s\n", c.Names[0], err) - } else { - fmt.Println("started container ", c.Names[0]) - } - } else if stopMethod == "pause" { - if err := dockerClient.ContainerUnpause(context.Background(), c.ID); err != nil { - fmt.Printf("Unable to unpause container %s: %s\n", c.Names[0], err) - } else { - fmt.Println("unpaused container ", c.Names[0]) - } - } - } -} - -func check(err error) { - if err != nil { - fmt.Println(err) - } -} diff --git a/minecraft_example/docker-compose.yaml b/minecraft_example/docker-compose.yaml deleted file mode 100644 index 3f2db4f..0000000 --- a/minecraft_example/docker-compose.yaml +++ /dev/null @@ -1,32 +0,0 @@ -version: "3" -services: - lazytainer: - container_name: lazytainer - image: ghcr.io/vmorganp/lazytainer:master - environment: - - PORT=25565 - - LABEL=lazytainer - - TIMEOUT=900 - ports: - - 25565:25565 - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro - mc: - image: itzg/minecraft-server:java8-multiarch - network_mode: service:lazytainer - environment: - EULA: "TRUE" - TYPE: FTBA - FTB_MODPACK_ID: 79 # direwolf20 1.16 - # MEMORYSIZE: 15G - # MEMORY: 15G - restart: unless-stopped - labels: - - "lazytainer.marker=lazytainer" - - "lazytainer.sleepMethod=stop" # can be either "stop" or "pause", or left blank for stop - # using "stop" will release the memory associated with the server - # using "pause" will keep the memory allocated, however it will restart much faster - depends_on: - - lazytainer - volumes: - - /home/temp/mc:/data \ No newline at end of file diff --git a/go.mod b/src/go.mod similarity index 96% rename from go.mod rename to src/go.mod index 1d1527c..b0c4449 100644 --- a/go.mod +++ b/src/go.mod @@ -11,6 +11,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gopacket v1.1.19 github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect diff --git a/go.sum b/src/go.sum similarity index 94% rename from go.sum rename to src/go.sum index d651706..1bb1fcd 100644 --- a/go.sum +++ b/src/go.sum @@ -22,6 +22,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -50,6 +52,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -82,6 +86,7 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/src/group.go b/src/group.go new file mode 100644 index 0000000..002328f --- /dev/null +++ b/src/group.go @@ -0,0 +1,184 @@ +package main + +import ( + "context" + "fmt" + "math" + "strconv" + "time" + + "github.com/cakturk/go-netstat/netstat" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + + "github.com/google/gopacket" + _ "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcap" +) + +type LazyGroup struct { + groupName string // the docker label associated with this group + inactiveTimeout uint16 // how many seconds of inactivity before group turns off + minPacketThreshold uint16 // minimum network traffic in packets before stop/pause occurs + netInterface string // which network interface to watch traffic on. By default this is eth0 but can sometimes vary + pollRate uint16 // how frequently to poll traffic statistics + ports []uint16 // list of ports, which happens to also be a 16 bit range, how convenient! + stopMethod string // whether to stop or pause the container +} + +func (lg LazyGroup) MainLoop() { + // rxPacketCount is continiously updated by the getRxPackets goroutine + var rxPacketCount int + go lg.getRxPackets(&rxPacketCount) + + inactiveSeconds := 0 + // initialize a slice to keep track of recnt network traffic + rxHistory := make([]int, int(math.Ceil(float64(lg.inactiveTimeout/lg.pollRate)))) + sleepTime := time.Duration(lg.pollRate) * time.Second + for { + rxHistory = append(rxHistory[1:], rxPacketCount) + if rxHistory[0] > rxHistory[len(rxHistory)-1] { + rxHistory = make([]int, int(math.Ceil(float64(lg.inactiveTimeout/lg.pollRate)))) + debugLogger.Println("rx packets overflowed and reset") + } + // if the container is running, see if it needs to be stopped + if lg.isGroupOn() { + debugLogger.Println(rxHistory[len(rxHistory)-1]-rxHistory[0], "packets received in the last", lg.inactiveTimeout, "seconds") + // if no clients are active on ports and threshold packets haven't been received in TIMEOUT secs + if lg.getActiveClients() == 0 && rxHistory[0]+int(lg.minPacketThreshold) > rxHistory[len(rxHistory)-1] { + // count up if no active clients + inactiveSeconds = inactiveSeconds + int(lg.pollRate) + fmt.Println(inactiveSeconds, "/", lg.inactiveTimeout, "seconds without an active client or sufficient traffic on running container") + if inactiveSeconds >= int(lg.inactiveTimeout) { + lg.stopContainers() + } + } else { + inactiveSeconds = 0 + } + } else { + // if more than THRESHOLD rx in last RXHISTSECONDS seconds, start the container + if rxHistory[0]+int(lg.minPacketThreshold) < rxHistory[len(rxHistory)-1] { + inactiveSeconds = 0 + lg.startContainers() + } else { + debugLogger.Println(rxHistory[len(rxHistory)-1], "received out of", rxHistory[0]+int(lg.minPacketThreshold), "packets needed to restart container") + } + } + time.Sleep(sleepTime) + debugLogger.Println("/////////////////////////////////////////////////////////////////////////") + } +} + +func (lg LazyGroup) getContainers() []types.Container { + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + check(err) + filter := filters.NewArgs(filters.Arg("label", "lazytainer.group="+lg.groupName)) + containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter}) + check(err) + + return containers +} + +func (lg LazyGroup) stopContainers() { + debugLogger.Println("stopping container(s)") + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + check(err) + for _, c := range lg.getContainers() { + if lg.stopMethod == "stop" || lg.stopMethod == "" { + if err := dockerClient.ContainerStop(context.Background(), c.ID, nil); err != nil { + fmt.Printf("ERROR: Unable to stop container %s: %s\n", c.Names[0], err) + } else { + infoLogger.Println("stopped container ", c.Names[0]) + } + } else if lg.stopMethod == "pause" { + if err := dockerClient.ContainerPause(context.Background(), c.ID); err != nil { + fmt.Printf("ERROR: Unable to pause container %s: %s\n", c.Names[0], err) + } else { + infoLogger.Println("paused container ", c.Names[0]) + } + } + } +} + +func (lg LazyGroup) startContainers() { + debugLogger.Println("starting container(s)") + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + check(err) + for _, c := range lg.getContainers() { + if lg.stopMethod == "stop" || lg.stopMethod == "" { + if err := dockerClient.ContainerStart(context.Background(), c.ID, types.ContainerStartOptions{}); err != nil { + fmt.Printf("ERROR: Unable to start container %s: %s\n", c.Names[0], err) + } else { + infoLogger.Println("started container ", c.Names[0]) + } + } else if lg.stopMethod == "pause" { + if err := dockerClient.ContainerUnpause(context.Background(), c.ID); err != nil { + fmt.Printf("ERROR: Unable to unpause container %s: %s\n", c.Names[0], err) + } else { + infoLogger.Println("unpaused container ", c.Names[0]) + } + } + } +} + +func (lg LazyGroup) getRxPackets(packetCount *int) { + handle, err := pcap.OpenLive(lg.netInterface, 1600, true, + pcap.BlockForever) // TODO I have no idea if 1600 is a "good" number. It's what's in the example in the docs though + check(err) + defer handle.Close() + + // configure filter based on passed ports, looks like "port 80 or port 81 or etc..." + var filter string + for _, v := range lg.ports { + filter += "port " + strconv.Itoa(int(v)) + " or " + } + filter = filter[0 : len(filter)-4] + + if err := handle.SetBPFFilter(filter); err != nil { + panic(err) + } + + packetSource := gopacket.NewPacketSource(handle, handle.LinkType()) + for range packetSource.Packets() { + // At some point this wraps around I think. + // I have no idea when that point is or what the consequences of letting it happen are so I'm forcing it to be 1m + *packetCount = (*packetCount + 1) % 1000000 + debugLogger.Println("group", lg.groupName, "recieved", *packetCount, "packets") + } +} + +func (lg LazyGroup) getActiveClients() int { + // get active clients + var allSocks []netstat.SockTabEntry + udpSocks, err := netstat.UDPSocks(netstat.NoopFilter) + check(err) + udp6Socks, err := netstat.UDP6Socks(netstat.NoopFilter) + check(err) + tcpSocks, err := netstat.TCPSocks(netstat.NoopFilter) + check(err) + tcp6Socks, err := netstat.TCP6Socks(netstat.NoopFilter) + check(err) + + activeClients := 0 + for _, socketEntry := range append(append(append(append(allSocks, udp6Socks...), tcp6Socks...), tcpSocks...), udpSocks...) { + if socketEntry.State.String() == "ESTABLISHED" { + for _, aPort := range lg.ports { + if strconv.FormatUint(uint64(aPort), 10) == fmt.Sprintf("%v", socketEntry.LocalAddr.Port) { + activeClients++ + } + } + } + } + debugLogger.Println(activeClients, "active clients") + return activeClients +} + +func (lg LazyGroup) isGroupOn() bool { + for _, c := range lg.getContainers() { + if c.State == "running" { + return true + } + } + return false +} diff --git a/src/lazytainer.go b/src/lazytainer.go new file mode 100644 index 0000000..9e30da7 --- /dev/null +++ b/src/lazytainer.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "io/ioutil" + "log" + "os" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" +) + +// log := logger() +var infoLogger *log.Logger +var debugLogger *log.Logger + +func main() { + flags := log.LstdFlags | log.Lshortfile + infoLogger = log.New(os.Stdout, "INFO: ", flags) + debugLogger = log.New(os.Stdout, "DEBUG: ", flags) + + // if the verbose flag isn't set to true, don't log debug logs + verbose, verboseFlagSet := os.LookupEnv("VERBOSE") + if !verboseFlagSet || strings.ToLower(verbose) != "true" { + debugLogger.SetOutput(ioutil.Discard) + } + + // configure groups. eventually it might be nice to have file based config as well. + groups := configureFromLabels() + for _, v := range groups { + go v.MainLoop() + } + + // a caseless select functions as an infinite sleep. Using that here since the group loops are all that really matters from here on + select {} +} + +func configureFromLabels() map[string]LazyGroup { + // theoretically this could create an issue if people manually hostname their lazytainer instances the same + // for now the solution is "don't do that" + // we could do something clever to get around this, but not right now. + container_id, err := os.Hostname() + check(err) + + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + check(err) + + filter := filters.NewArgs(filters.Arg("id", container_id)) + containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter}) + check(err) + + groups := make(map[string]LazyGroup) + labels := containers[0].Labels + + // iterate through labels, building out config for each group + prefix := "lazytainer.group." + for label := range labels { + if strings.HasPrefix(label, prefix) { + splitLabelValue := strings.Split(label, ".") + groupName := splitLabelValue[2] + + // check map to see if group is already created + _, exists := groups[groupName] + if exists { + continue + } + + // required parameters + + // configure ports + var ports []uint16 + for _, v := range strings.Split(labels[prefix+groupName+".ports"], ",") { + val, err := strconv.Atoi(v) + check(err) + ports = append(ports, uint16(val)) + } + + // optional parameters + + // configure inactiveTimeout + inactiveTimeout := uint16(30) + labelValueAsString, exists := labels[prefix+groupName+".inactiveTimeout"] + if exists { + val, err := strconv.Atoi(labelValueAsString) + check(err) + inactiveTimeout = uint16(val) + } else { + debugLogger.Println("Using default timeout of 60 because " + prefix + groupName + ".inactiveTimeout was not set") + } + + // configure minPacketThreshold + minPacketThreshold := uint16(30) + labelValueAsString, exists = labels[prefix+groupName+".minPacketThreshold"] + if exists { + val, err := strconv.Atoi(labelValueAsString) + check(err) + minPacketThreshold = uint16(val) + } else { + debugLogger.Println("Using default threshold of 30 because " + prefix + groupName + ".minPacketThreshold was not set") + } + + // configure netInterface + netInterface := "eth0" + labelValueAsString, exists = labels[prefix+groupName+".netInterface"] + if exists { + netInterface = labelValueAsString + } else { + debugLogger.Println("Using default netInterface of eth0 because " + prefix + groupName + ".netInterface was not set") + } + + // configure pollRate + pollRate := uint16(30) + labelValueAsString, exists = labels[prefix+groupName+".pollRate"] + if exists { + val, err := strconv.Atoi(labelValueAsString) + check(err) + pollRate = uint16(val) + } else { + debugLogger.Println("Using default pollRate of 30 because " + prefix + groupName + ".pollRate was not set") + } + + // configure stopMethod + stopMethod := "stop" + labelValueAsString, exists = labels[prefix+groupName+".stopMethod"] + if exists { + stopMethod = labelValueAsString + } else { + debugLogger.Println("Using default stopMethod of stop because " + prefix + groupName + ".stopMethod was not set") + } + + groups[groupName] = LazyGroup{ + groupName: groupName, + inactiveTimeout: inactiveTimeout, + minPacketThreshold: minPacketThreshold, + netInterface: netInterface, + pollRate: pollRate, + ports: ports, + stopMethod: stopMethod, + } + } + } + + for _, g := range groups { + debugLogger.Printf("%+v\n", g) + } + + return groups +} + +// general error handling +func check(err error) { + if err != nil { + // fmt.Println(err) + panic(err) + } +} diff --git a/zerotier_example/docker-compose.yml b/zerotier_example/docker-compose.yml deleted file mode 100644 index 41aa21d..0000000 --- a/zerotier_example/docker-compose.yml +++ /dev/null @@ -1,38 +0,0 @@ -version: "3" -services: - lazytainer: - container_name: lazytainer - image: ghcr.io/vmorganp/lazytainer:master - environment: - - PORT=81 # comma separated list of ports...or just the one - - LABEL=lazytainer # value of lazytainer.marker for other containers that lazytainer checks - - TIMEOUT=30 # OPTIONAL number of seconds to let container idle - - MINPACKETTHRESH=10 # OPTIONAL number of packets that must be recieved to keepalive/start container - - POLLRATE=1 # OPTIONAL number of seconds to sleep between polls - - VERBOSE=true # probably set this to false unless you're debugging or doing the initial demo - - INTERFACE=ztukuxxqii # Replace this with your ifconfig interface - ports: - - 81:81 # Can remove this section if you only want it accessible via zerotier - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro - zerotier: - image: zyclonite/zerotier - container_name: zerotier - network_mode: "service:lazytainer" - devices: - - /dev/net/tun - volumes: - - './zt:/var/lib/zerotier-one' - cap_add: - - NET_ADMIN - - SYS_ADMIN - whoami1: - container_name: whoami1 - image: containous/whoami - command: --port 81 - network_mode: service:lazytainer - depends_on: - - lazytainer - labels: - - "lazytainer.marker=lazytainer" - - "lazytainer.sleepMethod=pause" \ No newline at end of file