1
0
mirror of https://github.com/amir20/dozzle.git synced 2025-12-21 13:23:07 +01:00

feat!: implements swarm mode with agents (#3058)

This commit is contained in:
Amir Raminfar
2024-07-05 13:38:10 -07:00
committed by GitHub
parent 2e5fb71938
commit 4de9c775ba
70 changed files with 2681 additions and 963 deletions

View File

@@ -6,3 +6,4 @@ dist
.git .git
e2e e2e
docs docs
internal/agent/pb/

View File

@@ -58,6 +58,10 @@ jobs:
run: pnpm install run: pnpm install
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Writing certs to file
run: |
echo "${{ secrets.TTL_KEY }}" > shared_key.pem
echo "${{ secrets.TTL_CERT }}" > shared_cert.pem
- name: Build - name: Build
uses: docker/bake-action@v5 uses: docker/bake-action@v5
with: with:
@@ -90,10 +94,17 @@ jobs:
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- name: Checkout
uses: actions/checkout@v4
- name: Writing certs to file
run: |
echo "${{ secrets.TTL_KEY }}" > shared_key.pem
echo "${{ secrets.TTL_CERT }}" > shared_cert.pem
- name: Build and push - name: Build and push
uses: docker/build-push-action@v6.3.0 uses: docker/build-push-action@v6.3.0
with: with:
push: true push: true
context: .
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8 platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
build-args: TAG=${{ steps.meta.outputs.version }} build-args: TAG=${{ steps.meta.outputs.version }}

View File

@@ -24,9 +24,16 @@ jobs:
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- name: Checkout
uses: actions/checkout@v4
- name: Writing certs to file
run: |
echo "${{ secrets.TTL_KEY }}" > shared_key.pem
echo "${{ secrets.TTL_CERT }}" > shared_cert.pem
- name: Build and push - name: Build and push
uses: docker/build-push-action@v6.3.0 uses: docker/build-push-action@v6.3.0
with: with:
context: .
push: true push: true
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8 platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}

View File

@@ -58,8 +58,14 @@ jobs:
check-latest: true check-latest: true
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Install Protoc
uses: arduino/setup-protoc@v3
- name: Install gRPC and Go
run: |
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
- name: Run Go Tests with Coverage - name: Run Go Tests with Coverage
run: make test SKIP_ASSET=1 run: make test
- name: Stactic checker - name: Stactic checker
uses: dominikh/staticcheck-action@v1.3.1 uses: dominikh/staticcheck-action@v1.3.1
with: with:
@@ -85,6 +91,10 @@ jobs:
run: pnpm install run: pnpm install
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Writing certs to file
run: |
echo "${{ secrets.TTL_KEY }}" > shared_key.pem
echo "${{ secrets.TTL_CERT }}" > shared_cert.pem
- name: Build - name: Build
uses: docker/bake-action@v5 uses: docker/bake-action@v5
with: with:

2
.gitignore vendored
View File

@@ -12,3 +12,5 @@ coverage.out
/test-results/ /test-results/
/playwright-report/ /playwright-report/
/playwright/.cache/ /playwright/.cache/
*.pem
*.csr

1
.reflex.agent Normal file
View File

@@ -0,0 +1 @@
-r '\.(go)$' -R 'node_modules' -G '\*\_test.go' -s -- go run -race main.go --level debug agent

16
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Debug test",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": ["test"]
}
]
}

View File

@@ -1,7 +1,7 @@
{ {
"i18n-ally.localesPaths": ["locales"], "i18n-ally.localesPaths": ["locales"],
"i18n-ally.keystyle": "nested", "i18n-ally.keystyle": "nested",
"cSpell.words": ["healthcheck", "orderedmap"], "cSpell.words": ["healthcheck", "orderedmap", "stdcopy", "Warnf"],
"editor.formatOnSave": true, "editor.formatOnSave": true,
"i18n-ally.extract.autoDetect": true "i18n-ally.extract.autoDetect": true
} }

View File

@@ -1,5 +1,5 @@
# Build assets # Build assets
FROM --platform=$BUILDPLATFORM node:22.4.0-alpine as node FROM --platform=$BUILDPLATFORM node:22.4.0-alpine AS node
RUN corepack enable RUN corepack enable
@@ -24,7 +24,11 @@ RUN pnpm build
FROM --platform=$BUILDPLATFORM golang:1.22.5-alpine AS builder FROM --platform=$BUILDPLATFORM golang:1.22.5-alpine AS builder
RUN apk add --no-cache ca-certificates && mkdir /dozzle # install gRPC dependencies
RUN apk add --no-cache ca-certificates protoc protobuf-dev\
&& mkdir /dozzle \
&& go install google.golang.org/protobuf/cmd/protoc-gen-go@latest \
&& go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
WORKDIR /dozzle WORKDIR /dozzle
@@ -32,17 +36,22 @@ WORKDIR /dozzle
COPY go.* ./ COPY go.* ./
RUN go mod download RUN go mod download
# Copy assets built with node
COPY --from=node /build/dist ./dist
# Copy all other files # Copy all other files
COPY internal ./internal COPY internal ./internal
COPY main.go ./ COPY main.go ./
COPY protos ./protos
COPY shared_key.pem shared_cert.pem ./
# Copy assets built with node
COPY --from=node /build/dist ./dist
# Args # Args
ARG TAG=dev ARG TAG=dev
ARG TARGETOS TARGETARCH ARG TARGETOS TARGETARCH
# Generate protos
RUN go generate
# Build binary # Build binary
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH CGO_ENABLED=0 go build -ldflags "-s -w -X main.version=$TAG" -o dozzle RUN GOOS=$TARGETOS GOARCH=$TARGETARCH CGO_ENABLED=0 go build -ldflags "-s -w -X main.version=$TAG" -o dozzle
@@ -50,7 +59,6 @@ RUN mkdir /data
FROM scratch FROM scratch
ENV PATH /bin
COPY --from=builder /data /data COPY --from=builder /data /data
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=builder /dozzle/dozzle /dozzle COPY --from=builder /dozzle/dozzle /dozzle

View File

@@ -1,7 +1,14 @@
PROTO_DIR := protos
GEN_DIR := internal/agent/pb
PROTO_FILES := $(wildcard $(PROTO_DIR)/*.proto)
GEN_FILES := $(patsubst $(PROTO_DIR)/%.proto,$(GEN_DIR)/%.pb.go,$(PROTO_FILES))
.PHONY: clean .PHONY: clean
clean: clean:
@rm -rf dist @rm -rf dist
@go clean -i @go clean -i
@rm -f shared_key.pem shared_cert.pem
@rm -f $(GEN_DIR)/*.pb.go
.PHONY: dist .PHONY: dist
dist: dist:
@@ -14,17 +21,19 @@ fake_assets:
@echo "assets build was skipped" > dist/index.html @echo "assets build was skipped" > dist/index.html
.PHONY: test .PHONY: test
test: fake_assets test: fake_assets generate
go test -cover -race ./... go test -cover -race -count 1 -timeout 5s ./...
.PHONY: build .PHONY: build
build: dist build: dist generate
CGO_ENABLED=0 go build -ldflags "-s -w" CGO_ENABLED=0 go build -ldflags "-s -w"
.PHONY: docker .PHONY: docker
docker: docker: shared_key.pem shared_cert.pem
@docker build -t amir20/dozzle . @docker build -t amir20/dozzle .
generate: shared_key.pem shared_cert.pem $(GEN_FILES)
.PHONY: dev .PHONY: dev
dev: dev:
pnpm dev pnpm dev
@@ -32,3 +41,19 @@ dev:
.PHONY: int .PHONY: int
int: int:
docker compose up --build --force-recreate --exit-code-from playwright docker compose up --build --force-recreate --exit-code-from playwright
shared_key.pem:
@openssl genpkey -algorithm RSA -out shared_key.pem -pkeyopt rsa_keygen_bits:2048
shared_cert.pem:
@openssl req -new -key shared_key.pem -out shared_request.csr -subj "/C=US/ST=California/L=San Francisco/O=Dozzle"
@openssl x509 -req -in shared_request.csr -signkey shared_key.pem -out shared_cert.pem -days 365
@rm shared_request.csr
$(GEN_DIR)/%.pb.go: $(PROTO_DIR)/%.proto
@go generate
.PHONY: push
push: docker
@docker tag amir20/dozzle:latest amir20/dozzle:agent
@docker push amir20/dozzle:agent

View File

@@ -219,18 +219,6 @@ function useLogStream(url: Ref<string>, loadMoreUrl?: Ref<string>) {
} }
} }
// TODO this is a hack to connect the event source when the container is started
// watch(
// () => container.value.state,
// (newValue, oldValue) => {
// console.log("LogEventSource: container changed", newValue, oldValue);
// if (newValue == "running" && newValue != oldValue) {
// buffer.push(new DockerEventLogEntry("Container started", new Date(), "container-started"));
// connect({ clear: false });
// }
// },
// );
onScopeDispose(() => close()); onScopeDispose(() => close());
return { ...$$({ messages }), loadOlderLogs }; return { ...$$({ messages }), loadOlderLogs };

View File

@@ -79,12 +79,14 @@ export class Container {
get name() { get name() {
return this.isSwarm return this.isSwarm
? this.labels["com.docker.swarm.task.name"].replace(`.${this.labels["com.docker.swarm.task.id"]}`, "") ? this.labels["com.docker.swarm.task.name"]
.replace(`.${this.labels["com.docker.swarm.task.id"]}`, "")
.replace(`.${this.labels["com.docker.swarm.node.id"]}`, "")
: this._name; : this._name;
} }
get swarmId() { get swarmId() {
return this.labels["com.docker.swarm.service.id"]; return this.labels["com.docker.swarm.task.name"].replace(this.name + ".", "");
} }
get isSwarm() { get isSwarm() {

View File

@@ -7,6 +7,7 @@ services:
- DOZZLE_FILTER=name=custom_base - DOZZLE_FILTER=name=custom_base
- DOZZLE_BASE=/foobarbase - DOZZLE_BASE=/foobarbase
- DOZZLE_NO_ANALYTICS=1 - DOZZLE_NO_ANALYTICS=1
- DOZZLE_HOSTNAME=custom name
ports: ports:
- 8080:8080 - 8080:8080
build: build:
@@ -28,6 +29,7 @@ services:
environment: environment:
- DOZZLE_FILTER=name=dozzle - DOZZLE_FILTER=name=dozzle
- DOZZLE_NO_ANALYTICS=1 - DOZZLE_NO_ANALYTICS=1
- DOZZLE_HOSTNAME=localhost
ports: ports:
- 7070:8080 - 7070:8080
build: build:
@@ -35,7 +37,7 @@ services:
remote: remote:
container_name: remote container_name: remote
environment: environment:
- DOZZLE_REMOTE_HOST=tcp://proxy:2375 - DOZZLE_REMOTE_HOST=tcp://proxy:2375|remote-host
- DOZZLE_FILTER=name=dozzle - DOZZLE_FILTER=name=dozzle
- DOZZLE_NO_ANALYTICS=1 - DOZZLE_NO_ANALYTICS=1
ports: ports:
@@ -45,7 +47,35 @@ services:
depends_on: depends_on:
proxy: proxy:
condition: service_healthy condition: service_healthy
dozzle-with-agent:
container_name: with-agent
environment:
- DOZZLE_REMOTE_AGENT=agent:7007
- DOZZLE_NO_ANALYTICS=1
- DOZZLE_LEVEL=debug
ports:
- 8082:8080
build:
context: .
depends_on:
agent:
condition: service_healthy
agent:
container_name: agent
command: agent
environment:
- DOZZLE_FILTER=name=dozzle
- DOZZLE_NO_ANALYTICS=1
healthcheck:
test: ["CMD", "/dozzle", "healthcheck"]
interval: 5s
retries: 5
start_period: 5s
start_interval: 5s
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
build:
context: .
proxy: proxy:
container_name: proxy container_name: proxy
image: tecnativa/docker-socket-proxy image: tecnativa/docker-socket-proxy

View File

@@ -62,16 +62,17 @@ export default defineConfig({
text: "Advanced Configuration", text: "Advanced Configuration",
items: [ items: [
{ text: "Authentication", link: "/guide/authentication" }, { text: "Authentication", link: "/guide/authentication" },
{ text: "Swarm Mode", link: "/guide/swarm-mode" }, { text: "Actions", link: "/guide/actions" },
{ text: "Agent Mode", link: "/guide/agent" },
{ text: "Changing Base", link: "/guide/changing-base" }, { text: "Changing Base", link: "/guide/changing-base" },
{ text: "Healthcheck", link: "/guide/healthcheck" }, { text: "Data Analytics", link: "/guide/analytics" },
{ text: "Hostname", link: "/guide/hostname" }, { text: "Display Name", link: "/guide/hostname" },
{ text: "Remote Hosts", link: "/guide/remote-hosts" },
{ text: "Container Actions", link: "/guide/actions" },
{ text: "Filters", link: "/guide/filters" }, { text: "Filters", link: "/guide/filters" },
{ text: "Healthcheck", link: "/guide/healthcheck" },
{ text: "Remote Hosts", link: "/guide/remote-hosts" },
{ text: "Swarm Mode", link: "/guide/swarm-mode" },
{ text: "Supported Env Vars", link: "/guide/supported-env-vars" }, { text: "Supported Env Vars", link: "/guide/supported-env-vars" },
{ text: "Logging Files on Disk", link: "/guide/log-files-on-disk" }, { text: "Logging Files on Disk", link: "/guide/log-files-on-disk" },
{ text: "Analytics", link: "/guide/analytics" },
], ],
}, },
{ {

View File

@@ -1,6 +1,7 @@
// https://vitepress.dev/guide/custom-theme // https://vitepress.dev/guide/custom-theme
import { h } from "vue"; import { h } from "vue";
import Theme from "vitepress/theme"; import DefaultTheme from "vitepress/theme";
import "@fontsource-variable/playfair-display"; import "@fontsource-variable/playfair-display";
import "./style.css"; import "./style.css";
import HeroVideo from "./components/HeroVideo.vue"; import HeroVideo from "./components/HeroVideo.vue";
@@ -8,13 +9,15 @@ import BuyMeCoffee from "./components/BuyMeCoffee.vue";
import Stats from "./components/Stats.vue"; import Stats from "./components/Stats.vue";
export default { export default {
...Theme, ...DefaultTheme,
Layout: () => { Layout: () => {
return h(Theme.Layout, null, { return h(DefaultTheme.Layout, null, {
"home-hero-image": () => h(HeroVideo), "home-hero-image": () => h(HeroVideo),
"sidebar-nav-after": () => h(BuyMeCoffee), "sidebar-nav-after": () => h(BuyMeCoffee),
"home-hero-actions-after": () => h(Stats), "home-hero-actions-after": () => h(Stats),
}); });
}, },
enhanceApp({ app, router, siteData }) {}, enhanceApp(ctx) {
DefaultTheme.enhanceApp(ctx);
},
}; };

112
docs/guide/agent.md Normal file
View File

@@ -0,0 +1,112 @@
---
title: Agent Mode
---
# Agent Mode <Badge type="tip" text="Beta" />
Dozzle can run in agent mode which can expose Docker hosts to other Dozzle instance. All communication is done over a secured connection using TLS. This means that you can deploy Dozzle on a remote host and connect to it from your local machine.
## How to create an agent?
To create a Dozzle agent, you need to run Dozzle with the `agent` subcommand. Here is an example:
::: code-group
```sh
docker run -v /var/run/docker.sock:/var/run/docker.sock -p 7007:7007 amir20/dozzle:agent agent
```
```yaml [docker-compose.yml]
services:
dozzle-agent:
image: amir20/dozzle:latest
command: agent
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- 7007:7007
```
:::
The agent will start and listen on port `7007`. You can connect to the agent using the Dozzle UI by providing the agent's IP address and port. The agent will only show the containers that are available on the host where the agent is running.
> [!TIP]
> You don't need to expose port 7007 if using Docker network. The agent will be available to other containers on the same network.
## How to connect to an agent?
To connect to an agent, you need to provide the agent's IP address and port. Here is an example:
::: code-group
```sh
docker run -p 8080:8080 amir20/dozzle:latest --remote-agent agent-ip:7007
```
```yaml [docker-compose.yml]
services:
dozzle:
image: amir20/dozzle:latest
environment:
- DOZZLE_REMOTE_AGENT=agent:7007
ports:
- 8080:8080 # Dozzle UI port
```
:::
Note that when connecting remotely, you don't need to mount local Docker socket. The UI will only show the containers that are available on the agent.
> [!TIP]
> You can connect to multiple agents by providing multiple `DOZZLE_REMOTE_AGENT` environment variables. For example, `DOZZLE_REMOTE_AGENT=agent1:7007,agent2:7007`.
## Setting up healthcheck
You can set a healthcheck for the agent, similar to the healthcheck for the main Dozzle instance. When running in agent mode, healthcheck checks agent connection to Docker. If Docker is not reachable, the agent will be marked as unhealthy and will not be shown in the UI.
To set up healthcheck, use the `healthcheck` subcommand. Here is an example:
```yml
services:
dozzle-agent:
image: amir20/dozzle:latest
command: agent
healthcheck:
test: ["CMD", "/dozzle", "healthcheck"]
interval: 5s
retries: 5
start_period: 5s
start_interval: 5s
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- 7007:7007
```
## Changing agent's name
Similar to Dozzle instance, you can change the agent's name by providing the `DOZZLE_HOSTNAME` environment variable. Here is an example:
::: code-group
```sh
docker run -v /var/run/docker.sock:/var/run/docker.sock -p 7007:7007 amir20/dozzle:agent agent --hostname my-special-name
```
```yaml [docker-compose.yml]
services:
dozzle-agent:
image: amir20/dozzle:latest
command: agent
environment:
- DOZZLE_HOSTNAME=my-special-name
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- 7007:7007
```
:::
This will change the agent's name to `my-special-name` and reflected on the UI when connecting to the agent.

View File

@@ -65,7 +65,7 @@ users:
Dozzle uses [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) to generate tokens for authentication. This token is saved in a cookie. Dozzle uses [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) to generate tokens for authentication. This token is saved in a cookie.
### Generating users.yml ## Generating users.yml <Badge type="tip" text="v6.6.x" />
Starting with version `v6.6.x`, Dozzle has a builtin `generate` command to generate `users.yml`. Here is an example: Starting with version `v6.6.x`, Dozzle has a builtin `generate` command to generate `users.yml`. Here is an example:

View File

@@ -4,28 +4,49 @@ title: Getting Started
# Getting Started # Getting Started
This section will help you to setup Dozzle locally. Dozzle can also be used to connect to remote hosts via `tcp://` and tls. See remote host if you want to connect to other hosts. Dozzle supports multiple ways to run the application. You can run it using Docker CLI, Docker Compose, or in Swarm. The following sections will guide you through the process of setting up Dozzle.
## Using Docker CLI ## Running with Docker <Badge type="tip" text="Updated" />
The easiest way to setup Dozzle is to use the CLI and mount `docker.sock` file. This file is usually located at `/var/run/docker.sock` and can be mounted with the `--volume` flag. You also need to expose the port to view Dozzle. By default, Dozzle listens on port 8080, but you can change the external port using `-p`. The easiest way to setup Dozzle is to use the CLI and mount `docker.sock` file. This file is usually located at `/var/run/docker.sock` and can be mounted with the `--volume` flag. You also need to expose the port to view Dozzle. By default, Dozzle listens on port 8080, but you can change the external port using `-p`. You can also run using compose or as a service in Swarm.
::: code-group
```sh ```sh
docker run --detach --volume=/var/run/docker.sock:/var/run/docker.sock -p 8080:8080 amir20/dozzle docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 8080:8080 amir20/dozzle
``` ```
## Using Docker Compose ```yaml [docker-compose.yml]
# Run with docker compose up -d
Docker compose makes it easier to configure Dozzle as part of an existing configuration.
```yaml
version: "3"
services: services:
dozzle: dozzle:
container_name: dozzle
image: amir20/dozzle:latest image: amir20/dozzle:latest
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
ports: ports:
- 9999:8080 - 8080:8080
``` ```
```yaml [dozzle-stack.yml]
# Run with docker stack deploy -c dozzle-stack.yml <name>
services:
dozzle:
image: amir20/dozzle:latest
environment:
- DOZZLE_MODE=swarm
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8080:8080
networks:
- dozzle
deploy:
mode: global
networks:
dozzle:
driver: overlay
```
:::
See [swarm mode](/guide/swarm-mode) for more information on running Dozzle in Swarm.

View File

@@ -2,11 +2,16 @@
title: Remote Host Setup title: Remote Host Setup
--- ---
# Remote Host Setup # Remote Host Setup <Badge type="warning" text="Deprecated" />
Dozzle supports connecting to multiple remote hosts via `tcp://` using TLS and non-secured connections. Dozzle will need to have appropriate certs mounted to use secured connection. `ssh://` is not supported because Dozzle docker image does not ship with any ssh clients. Dozzle supports connecting to remote Docker hosts. This is useful when running Dozzle in a container and you want to monitor a different Docker host.
## Connecting to remote hosts However, with Dozzle agents, you can connect to remote hosts without exposing the Docker socket. See the [agent](/guide/agent) page for more information.
> [!WARNING]
> Remote hosts will soon be deprecated in favor of agents. Agents provide a more secure way to connect to remote hosts. See the [agent](/guide/agent) page for more information. If you want keep using remote hosts then follow this discussion on [GitHub](/github.com/amir20/dozzle/issues/xxx).
## Connecting to remote hosts with TLS
Remote hosts can be configured with `--remote-host` or `DOZZLE_REMOTE_HOST`. All certs must be mounted to `/certs` directory. The `/certs` directory expects to have `/certs/{ca,cert,key}.pem` or `/certs/{host}/{ca,cert,key}.pem` in case of multiple hosts. Remote hosts can be configured with `--remote-host` or `DOZZLE_REMOTE_HOST`. All certs must be mounted to `/certs` directory. The `/certs` directory expects to have `/certs/{ca,cert,key}.pem` or `/certs/{host}/{ca,cert,key}.pem` in case of multiple hosts.

View File

@@ -2,11 +2,40 @@
title: Swarm Mode title: Swarm Mode
--- ---
# Introducing Swarm Mode # Swarm Mode <Badge text="New" type="tip" />
Dozzle added "Swarm Mode" in version 7 which supports Docker [stacks](https://docs.docker.com/reference/cli/docker/stack/deploy/), [services](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) and custom groups for joining logs together. Dozzle does not use Swarm API internally as it is limited. Dozzle implements its own grouping using swarm labels. Additionally, Dozzle merges stats for containers in a group. This means that you can see logs and stats for all containers in a group in one view. But it does mean that each host needs to be setup with Dozzle. Dozzle supports Docker Swarm Mode starting from version 8. When using Swarm Mode, Dozzle will automatically discover services and custom groups. Dozzle does not use Swarm API internally as it is [limited](https://github.com/moby/moby/issues/33183). Dozzle implements its own grouping using swarm labels. Additionally, Dozzle merges stats for containers in a group. This means that you can see logs and stats for all containers in a group in one view. But it does mean each host needs to be setup with Dozzle.
Dozzle swarm mode is automatically enabled when services or customer groups are found. If you are not using services, you can still take advantage of Dozzle's grouping feature by adding a label to your containers. ## How does it work?
When deployed in Swarm Mode, Dozzle will create a secured mesh network between all the nodes in the swarm. This network is used to communicate between the different Dozzle instances. The mesh network is created using [mTLS](https://www.cloudflare.com/learning/access-management/what-is-mutual-tls) with a private TLS certificate. This means that all communication between the different Dozzle instances is encrypted and safe to deploy any where.
Dozzle supports Docker [stacks](https://docs.docker.com/reference/cli/docker/stack/deploy/), [services](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) and custom groups for joining logs together. `com.docker.stack.namespace` and `com.docker.compose.project` labels are used for grouping containers. For services, Dozzle uses the service name as the group name which is `com.docker.swarm.service.name`.
## How to enable Swarm Mode?
To deploy on every node in the swarm, you can use `mode: global`. This will deploy Dozzle on every node in the swarm. Here is an example using Docker Stack:
```yml
services:
dozzle:
image: amir20/dozzle:latest
environment:
- DOZZLE_MODE=swarm
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8080:8080
networks:
- dozzle
deploy:
mode: global
networks:
dozzle:
driver: overlay
```
Note that the `DOZZLE_MODE` environment variable is set to `swarm`. This tells Dozzle to automatically discover other Dozzle instances in the swarm. The `overlay` network is used to create the mesh network between the different Dozzle instances.
## Custom Groups ## Custom Groups
@@ -30,14 +59,3 @@ services:
``` ```
::: :::
## Merging Logs and Stats
Dozzle merges logs and stats for containers in a group. This means that you can see logs and stats for all containers in a group in one view. This is useful for applications that have multiple containers that work together. Dozzle will automatically find new containers in a group and add them to the view as they are started.
> [!INFO]
> Automatic discovery of new containers is only available for services and custom groups. If you using merging logs in host mode, only specific containers will be shown. You can still use custom groups to merge logs for containers in swarm mode.
## Service Discovery
Dozzle uses Docker API to discover services and custom groups. This means that Dozzle will automatically find new containers in a group and add them to the view as they are started. This is useful for applications that have multiple containers that work together. Labels that are used are `com.docker.stack.namespace` and `com.docker.compose.project` for grouping containers. For services, Dozzle uses the service name as the group name which is `com.docker.swarm.service.name`.

15
e2e/agent.ts Normal file
View File

@@ -0,0 +1,15 @@
import { test, expect } from "@playwright/test";
test.beforeEach(async ({ page }) => {
await page.goto("http://remote:8080/");
});
test("has right title", async ({ page }) => {
await expect(page).toHaveTitle(/.* - Dozzle/);
});
test("select running container", async ({ page }) => {
await page.getByTestId("side-menu").getByRole("link", { name: "dozzle" }).click();
await expect(page).toHaveURL(/\/container/);
await expect(page.getByText("Accepting connections")).toBeVisible();
});

View File

@@ -1,7 +1,7 @@
import { test, expect } from "@playwright/test"; import { test, expect } from "@playwright/test";
test.beforeEach(async ({ page }) => { test.beforeEach(async ({ page }) => {
await page.goto("http://remote:8080/"); await page.goto("http://dozzle-with-agent:8080/");
}); });
test("has right title", async ({ page }) => { test("has right title", async ({ page }) => {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 14 KiB

18
examples/docker.swarm.yml Normal file
View File

@@ -0,0 +1,18 @@
services:
dozzle:
image: amir20/dozzle:agent
environment:
- DOZZLE_LEVEL=debug
- DOZZLE_MODE=swarm
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "8080:8080"
networks:
- dozzle
deploy:
mode: global
networks:
dozzle:
driver: overlay

2
go.mod
View File

@@ -26,10 +26,12 @@ require (
require ( require (
github.com/PuerkitoBio/goquery v1.9.2 github.com/PuerkitoBio/goquery v1.9.2
github.com/cenkalti/backoff/v4 v4.3.0
github.com/go-chi/chi/v5 v5.1.0 github.com/go-chi/chi/v5 v5.1.0
github.com/go-chi/jwtauth/v5 v5.3.1 github.com/go-chi/jwtauth/v5 v5.3.1
github.com/goccy/go-json v0.10.3 github.com/goccy/go-json v0.10.3
github.com/puzpuzpuz/xsync/v3 v3.2.0 github.com/puzpuzpuz/xsync/v3 v3.2.0
github.com/samber/lo v1.43.0
github.com/wk8/go-ordered-map/v2 v2.1.8 github.com/wk8/go-ordered-map/v2 v2.1.8
github.com/yuin/goldmark v1.7.4 github.com/yuin/goldmark v1.7.4
google.golang.org/grpc v1.65.0 google.golang.org/grpc v1.65.0

15
go.sum
View File

@@ -16,8 +16,8 @@ github.com/beme/abide v0.0.0-20190723115211-635a09831760 h1:FvTM5NSN5HYvfKpgL+8x
github.com/beme/abide v0.0.0-20190723115211-635a09831760/go.mod h1:6+8gCKsZnxzhGTmKRh4BSkLos9CbWRJNcrp55We4SqQ= github.com/beme/abide v0.0.0-20190723115211-635a09831760/go.mod h1:6+8gCKsZnxzhGTmKRh4BSkLos9CbWRJNcrp55We4SqQ=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -28,8 +28,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v27.0.2+incompatible h1:mNhCtgXNV1fIRns102grG7rdzIsGGCq1OlOD0KunZos=
github.com/docker/docker v27.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -102,6 +100,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/puzpuzpuz/xsync/v3 v3.2.0 h1:9AzuUeF88YC5bK8u2vEG1Fpvu4wgpM1wfPIExfaaDxQ= github.com/puzpuzpuz/xsync/v3 v3.2.0 h1:9AzuUeF88YC5bK8u2vEG1Fpvu4wgpM1wfPIExfaaDxQ=
github.com/puzpuzpuz/xsync/v3 v3.2.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/puzpuzpuz/xsync/v3 v3.2.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/samber/lo v1.43.0 h1:ts0VhPi8+ZQZFVLv/2Vkgt2Cds05FM2v3Enmv+YMBtg=
github.com/samber/lo v1.43.0/go.mod h1:w7R6fO7h2lrnx/s0bWcZ55vXJI89p5UPM6+kyDL373E=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
@@ -208,12 +208,11 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=

1
internal/agent/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
pb

370
internal/agent/client.go Normal file
View File

@@ -0,0 +1,370 @@
package agent
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"time"
"github.com/amir20/dozzle/internal/agent/pb"
"github.com/amir20/dozzle/internal/docker"
"github.com/amir20/dozzle/internal/utils"
log "github.com/sirupsen/logrus"
orderedmap "github.com/wk8/go-ordered-map/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
)
type Client struct {
client pb.AgentServiceClient
host docker.Host
}
func NewClient(endpoint string, certificates tls.Certificate, opts ...grpc.DialOption) (*Client, error) {
caCertPool := x509.NewCertPool()
c, err := x509.ParseCertificate(certificates.Certificate[0])
if err != nil {
log.Fatalf("failed to parse certificate: %v", err)
}
caCertPool.AddCert(c)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{certificates},
RootCAs: caCertPool,
InsecureSkipVerify: true, // Set to true if the server's hostname does not match the certificate
}
// Create the gRPC transport credentials
creds := credentials.NewTLS(tlsConfig)
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
log.Fatalf("failed to connect to server: %v", err)
}
client := pb.NewAgentServiceClient(conn)
info, err := client.HostInfo(context.Background(), &pb.HostInfoRequest{})
if err != nil {
return nil, err
}
return &Client{
client: client,
host: docker.Host{
ID: info.Host.Id,
Name: info.Host.Name,
NCPU: int(info.Host.CpuCores),
MemTotal: int64(info.Host.Memory),
Endpoint: endpoint,
},
}, nil
}
func rpcErrToErr(err error) error {
status, ok := status.FromError(err)
if !ok {
return err
}
if status.Code() == codes.Unknown && status.Message() == "EOF" {
return fmt.Errorf("found EOF while streaming logs: %w", io.EOF)
}
switch status.Code() {
case codes.Canceled:
return fmt.Errorf("canceled: %v with %w", status.Message(), context.Canceled)
case codes.DeadlineExceeded:
return fmt.Errorf("deadline exceeded: %v with %w", status.Message(), context.DeadlineExceeded)
case codes.Unknown:
return fmt.Errorf("unknown error: %v with %w", status.Message(), err)
default:
return fmt.Errorf("unknown error: %v with %w", status.Message(), err)
}
}
func (c *Client) LogsBetweenDates(ctx context.Context, containerID string, since time.Time, until time.Time, std docker.StdType) (<-chan *docker.LogEvent, error) {
stream, err := c.client.LogsBetweenDates(ctx, &pb.LogsBetweenDatesRequest{
ContainerId: containerID,
Since: timestamppb.New(since),
Until: timestamppb.New(until),
StreamTypes: int32(std),
})
if err != nil {
return nil, err
}
events := make(chan *docker.LogEvent)
go func() {
sendLogs(stream, events)
close(events)
}()
return events, nil
}
func (c *Client) StreamContainerLogs(ctx context.Context, containerID string, since time.Time, std docker.StdType, events chan<- *docker.LogEvent) error {
stream, err := c.client.StreamLogs(ctx, &pb.StreamLogsRequest{
ContainerId: containerID,
Since: timestamppb.New(since),
StreamTypes: int32(std),
})
if err != nil {
return err
}
return sendLogs(stream, events)
}
func sendLogs(stream pb.AgentService_StreamLogsClient, events chan<- *docker.LogEvent) error {
for {
resp, err := stream.Recv()
if err != nil {
return rpcErrToErr(err)
}
m, err := resp.Event.Message.UnmarshalNew()
if err != nil {
log.Fatalf("cannot unpack message %v", err)
}
var message any
switch m := m.(type) {
case *pb.SimpleMessage:
message = m.Message
case *pb.ComplexMessage:
message = jsonBytesToOrderedMap(m.Data)
default:
log.Fatalf("agent client: unknown type %T", m)
}
events <- &docker.LogEvent{
Id: resp.Event.Id,
ContainerID: resp.Event.ContainerId,
Message: message,
Timestamp: resp.Event.Timestamp.AsTime().Unix(),
Position: docker.LogPosition(resp.Event.Position),
Level: resp.Event.Level,
Stream: resp.Event.Stream,
}
}
}
func (c *Client) StreamRawBytes(ctx context.Context, containerID string, since time.Time, until time.Time, std docker.StdType) (io.ReadCloser, error) {
out, err := c.client.StreamRawBytes(context.Background(), &pb.StreamRawBytesRequest{
ContainerId: containerID,
Since: timestamppb.New(since),
Until: timestamppb.New(until),
StreamTypes: int32(std),
})
if err != nil {
return nil, err
}
r, w := io.Pipe()
go func() {
defer w.Close()
for {
resp, err := out.Recv()
err = rpcErrToErr(err)
if err != nil {
if err == io.EOF || err == context.Canceled {
return
} else {
log.Warnf("error while streaming raw bytes %v", err)
return
}
}
w.Write(resp.Data)
}
}()
return r, nil
}
func (c *Client) StreamStats(ctx context.Context, stats chan<- docker.ContainerStat) error {
stream, err := c.client.StreamStats(ctx, &pb.StreamStatsRequest{})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
return rpcErrToErr(err)
}
stats <- docker.ContainerStat{
CPUPercent: resp.Stat.CpuPercent,
MemoryPercent: resp.Stat.MemoryPercent,
MemoryUsage: resp.Stat.MemoryUsage,
ID: resp.Stat.Id,
}
}
}
func (c *Client) StreamEvents(ctx context.Context, events chan<- docker.ContainerEvent) error {
stream, err := c.client.StreamEvents(ctx, &pb.StreamEventsRequest{})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
return rpcErrToErr(err)
}
events <- docker.ContainerEvent{
ActorID: resp.Event.ActorId,
Name: resp.Event.Name,
Host: resp.Event.Host,
}
}
}
func (c *Client) StreamNewContainers(ctx context.Context, containers chan<- docker.Container) error {
stream, err := c.client.StreamContainerStarted(ctx, &pb.StreamContainerStartedRequest{})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
return rpcErrToErr(err)
}
started := resp.Container.Started.AsTime()
containers <- docker.Container{
ID: resp.Container.Id,
Name: resp.Container.Name,
Image: resp.Container.Image,
Labels: resp.Container.Labels,
Group: resp.Container.Group,
ImageID: resp.Container.ImageId,
Created: resp.Container.Created.AsTime(),
State: resp.Container.State,
Status: resp.Container.Status,
Health: resp.Container.Health,
Host: resp.Container.Host,
Tty: resp.Container.Tty,
StartedAt: &started,
Command: resp.Container.Command,
}
}
}
func (c *Client) FindContainer(containerID string) (docker.Container, error) {
response, err := c.client.FindContainer(context.Background(), &pb.FindContainerRequest{ContainerId: containerID})
if err != nil {
return docker.Container{}, err
}
var stats []docker.ContainerStat
for _, stat := range response.Container.Stats {
stats = append(stats, docker.ContainerStat{
ID: stat.Id,
CPUPercent: stat.CpuPercent,
MemoryPercent: stat.MemoryPercent,
MemoryUsage: stat.MemoryUsage,
})
}
var startedAt *time.Time
if response.Container.Started != nil {
started := response.Container.Started.AsTime()
startedAt = &started
}
return docker.Container{
ID: response.Container.Id,
Name: response.Container.Name,
Image: response.Container.Image,
Labels: response.Container.Labels,
Group: response.Container.Group,
ImageID: response.Container.ImageId,
Created: response.Container.Created.AsTime(),
State: response.Container.State,
Status: response.Container.Status,
Health: response.Container.Health,
Host: response.Container.Host,
Tty: response.Container.Tty,
Command: response.Container.Command,
Stats: utils.RingBufferFrom(300, stats),
StartedAt: startedAt,
}, nil
}
func (c *Client) ListContainers() ([]docker.Container, error) {
response, err := c.client.ListContainers(context.Background(), &pb.ListContainersRequest{})
if err != nil {
return nil, err
}
containers := make([]docker.Container, 0)
for _, container := range response.Containers {
var stats []docker.ContainerStat
for _, stat := range container.Stats {
stats = append(stats, docker.ContainerStat{
ID: stat.Id,
CPUPercent: stat.CpuPercent,
MemoryPercent: stat.MemoryPercent,
MemoryUsage: stat.MemoryUsage,
})
}
var startedAt *time.Time
if container.Started != nil {
started := container.Started.AsTime()
startedAt = &started
}
containers = append(containers, docker.Container{
ID: container.Id,
Name: container.Name,
Image: container.Image,
Labels: container.Labels,
Group: container.Group,
ImageID: container.ImageId,
Created: container.Created.AsTime(),
State: container.State,
Status: container.Status,
Health: container.Health,
Host: container.Host,
Tty: container.Tty,
Stats: utils.RingBufferFrom(300, stats),
Command: container.Command,
StartedAt: startedAt,
})
}
return containers, nil
}
func (c *Client) Host() docker.Host {
return c.host
}
func jsonBytesToOrderedMap(b []byte) *orderedmap.OrderedMap[string, any] {
var data *orderedmap.OrderedMap[string, any]
reader := bytes.NewReader(b)
json.NewDecoder(reader).Decode(&data)
return data
}

View File

@@ -0,0 +1,196 @@
package agent
import (
"context"
"crypto/tls"
"io"
"net"
"os"
"path"
"testing"
"time"
"github.com/amir20/dozzle/internal/docker"
"github.com/amir20/dozzle/internal/utils"
"github.com/docker/docker/api/types/system"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"google.golang.org/grpc"
"google.golang.org/grpc/test/bufconn"
)
const bufSize = 1024 * 1024
var lis *bufconn.Listener
var certs tls.Certificate
var client *MockedClient
type MockedClient struct {
mock.Mock
docker.Client
}
func (m *MockedClient) FindContainer(id string) (docker.Container, error) {
args := m.Called(id)
return args.Get(0).(docker.Container), args.Error(1)
}
func (m *MockedClient) ContainerActions(action docker.ContainerAction, containerID string) error {
args := m.Called(action, containerID)
return args.Error(0)
}
func (m *MockedClient) ContainerEvents(ctx context.Context, events chan<- docker.ContainerEvent) error {
args := m.Called(ctx, events)
return args.Error(0)
}
func (m *MockedClient) ListContainers() ([]docker.Container, error) {
args := m.Called()
return args.Get(0).([]docker.Container), args.Error(1)
}
func (m *MockedClient) ContainerLogs(ctx context.Context, id string, since time.Time, stdType docker.StdType) (io.ReadCloser, error) {
args := m.Called(ctx, id, since, stdType)
return args.Get(0).(io.ReadCloser), args.Error(1)
}
func (m *MockedClient) ContainerStats(context.Context, string, chan<- docker.ContainerStat) error {
return nil
}
func (m *MockedClient) ContainerLogsBetweenDates(ctx context.Context, id string, from time.Time, to time.Time, stdType docker.StdType) (io.ReadCloser, error) {
args := m.Called(ctx, id, from, to, stdType)
return args.Get(0).(io.ReadCloser), args.Error(1)
}
func (m *MockedClient) Host() docker.Host {
args := m.Called()
return args.Get(0).(docker.Host)
}
func (m *MockedClient) IsSwarmMode() bool {
return false
}
func (m *MockedClient) SystemInfo() system.Info {
return system.Info{ID: "123"}
}
func init() {
lis = bufconn.Listen(bufSize)
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
root := path.Join(cwd, "../../")
certs, err = tls.LoadX509KeyPair(path.Join(root, "shared_cert.pem"), path.Join(root, "shared_key.pem"))
if err != nil {
panic(err)
}
client = &MockedClient{}
client.On("ListContainers").Return([]docker.Container{
{
ID: "123456",
Name: "test",
Host: "localhost",
},
}, nil)
client.On("Host").Return(docker.Host{
ID: "localhost",
Endpoint: "local",
Name: "local",
})
client.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
time.Sleep(5 * time.Second)
})
client.On("FindContainer", "123456").Return(docker.Container{
ID: "123456",
Name: "test",
Host: "localhost",
Image: "test",
ImageID: "test",
StartedAt: &time.Time{},
State: "running",
Status: "running",
Health: "healthy",
Group: "test",
Command: "test",
Created: time.Time{},
Tty: true,
Labels: map[string]string{
"test": "test",
},
Stats: utils.NewRingBuffer[docker.ContainerStat](300),
}, nil)
go RunServer(client, certs, lis)
}
func bufDialer(ctx context.Context, address string) (net.Conn, error) {
return lis.Dial()
}
func TestFindContainer(t *testing.T) {
rpc, err := NewClient("passthrough://bufnet", certs, grpc.WithContextDialer(bufDialer))
if err != nil {
t.Fatal(err)
}
container, _ := rpc.FindContainer("123456")
assert.Equal(t, container, docker.Container{
ID: "123456",
Name: "test",
Host: "localhost",
Image: "test",
ImageID: "test",
StartedAt: &time.Time{},
State: "running",
Status: "running",
Health: "healthy",
Group: "test",
Command: "test",
Created: time.Time{},
Tty: true,
Labels: map[string]string{
"test": "test",
},
Stats: utils.NewRingBuffer[docker.ContainerStat](300),
})
}
func TestListContainers(t *testing.T) {
rpc, err := NewClient("passthrough://bufnet", certs, grpc.WithContextDialer(bufDialer))
if err != nil {
t.Fatal(err)
}
containers, _ := rpc.ListContainers()
assert.Equal(t, containers, []docker.Container{
{
ID: "123456",
Name: "test",
Host: "localhost",
Image: "test",
ImageID: "test",
StartedAt: &time.Time{},
State: "running",
Status: "running",
Health: "healthy",
Group: "test",
Command: "test",
Created: time.Time{},
Tty: true,
Labels: map[string]string{
"test": "test",
},
Stats: utils.NewRingBuffer[docker.ContainerStat](300),
},
})
}

353
internal/agent/server.go Normal file
View File

@@ -0,0 +1,353 @@
package agent
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"net"
"time"
"github.com/amir20/dozzle/internal/agent/pb"
"github.com/amir20/dozzle/internal/docker"
orderedmap "github.com/wk8/go-ordered-map/v2"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/grpc/status"
)
type server struct {
client docker.Client
store *docker.ContainerStore
pb.UnimplementedAgentServiceServer
}
func NewServer(client docker.Client) pb.AgentServiceServer {
return &server{
client: client,
store: docker.NewContainerStore(context.Background(), client),
}
}
func (s *server) StreamLogs(in *pb.StreamLogsRequest, out pb.AgentService_StreamLogsServer) error {
since := time.Time{}
if in.Since != nil {
since = in.Since.AsTime()
}
reader, err := s.client.ContainerLogs(out.Context(), in.ContainerId, since, docker.StdType(in.StreamTypes))
if err != nil {
return err
}
container, err := s.store.FindContainer(in.ContainerId)
if err != nil {
return err
}
g := docker.NewEventGenerator(reader, container)
for {
select {
case event := <-g.Events:
out.Send(&pb.StreamLogsResponse{
Event: logEventToPb(event),
})
case e := <-g.Errors:
return e
case <-out.Context().Done():
return nil
}
}
}
func (s *server) LogsBetweenDates(in *pb.LogsBetweenDatesRequest, out pb.AgentService_LogsBetweenDatesServer) error {
reader, err := s.client.ContainerLogsBetweenDates(out.Context(), in.ContainerId, in.Since.AsTime(), in.Until.AsTime(), docker.StdType(in.StreamTypes))
if err != nil {
return err
}
container, err := s.client.FindContainer(in.ContainerId)
if err != nil {
return err
}
g := docker.NewEventGenerator(reader, container)
for {
select {
case event := <-g.Events:
out.Send(&pb.StreamLogsResponse{
Event: logEventToPb(event),
})
case e := <-g.Errors:
return e
case <-out.Context().Done():
return nil
}
}
}
func (s *server) StreamRawBytes(in *pb.StreamRawBytesRequest, out pb.AgentService_StreamRawBytesServer) error {
reader, err := s.client.ContainerLogsBetweenDates(out.Context(), in.ContainerId, in.Since.AsTime(), in.Until.AsTime(), docker.StdType(in.StreamTypes))
if err != nil {
return err
}
buf := make([]byte, 1024)
for {
n, err := reader.Read(buf)
if err != nil {
return err
}
if n == 0 {
break
}
if err := out.Send(&pb.StreamRawBytesResponse{
Data: buf[:n],
}); err != nil {
return err
}
}
return nil
}
func (s *server) StreamEvents(in *pb.StreamEventsRequest, out pb.AgentService_StreamEventsServer) error {
events := make(chan docker.ContainerEvent)
s.store.SubscribeEvents(out.Context(), events)
for {
select {
case event := <-events:
out.Send(&pb.StreamEventsResponse{
Event: &pb.ContainerEvent{
ActorId: event.ActorID,
Name: event.Name,
Host: event.Host,
},
})
case <-out.Context().Done():
return nil
}
}
}
func (s *server) StreamStats(in *pb.StreamStatsRequest, out pb.AgentService_StreamStatsServer) error {
stats := make(chan docker.ContainerStat)
s.store.SubscribeStats(out.Context(), stats)
for {
select {
case stat := <-stats:
out.Send(&pb.StreamStatsResponse{
Stat: &pb.ContainerStat{
Id: stat.ID,
CpuPercent: stat.CPUPercent,
MemoryPercent: stat.MemoryPercent,
MemoryUsage: stat.MemoryUsage,
},
})
case <-out.Context().Done():
return nil
}
}
}
func (s *server) FindContainer(ctx context.Context, in *pb.FindContainerRequest) (*pb.FindContainerResponse, error) {
container, err := s.store.FindContainer(in.ContainerId)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
return &pb.FindContainerResponse{
Container: &pb.Container{
Id: container.ID,
Name: container.Name,
Image: container.Image,
ImageId: container.ImageID,
Command: container.Command,
Created: timestamppb.New(container.Created),
State: container.State,
Status: container.Status,
Health: container.Health,
Host: container.Host,
Tty: container.Tty,
Labels: container.Labels,
Group: container.Group,
Started: timestamppb.New(*container.StartedAt),
},
}, nil
}
func (s *server) ListContainers(ctx context.Context, in *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
containers, err := s.store.ListContainers()
if err != nil {
return nil, err
}
var pbContainers []*pb.Container
for _, container := range containers {
var pbStats []*pb.ContainerStat
for _, stat := range container.Stats.Data() {
pbStats = append(pbStats, &pb.ContainerStat{
Id: stat.ID,
CpuPercent: stat.CPUPercent,
MemoryPercent: stat.MemoryPercent,
MemoryUsage: stat.MemoryUsage,
})
}
var startedAt *timestamppb.Timestamp
if container.StartedAt != nil {
startedAt = timestamppb.New(*container.StartedAt)
}
pbContainers = append(pbContainers, &pb.Container{
Id: container.ID,
Name: container.Name,
Image: container.Image,
ImageId: container.ImageID,
Created: timestamppb.New(container.Created),
State: container.State,
Status: container.Status,
Health: container.Health,
Host: container.Host,
Tty: container.Tty,
Labels: container.Labels,
Group: container.Group,
Started: startedAt,
Stats: pbStats,
Command: container.Command,
})
}
return &pb.ListContainersResponse{
Containers: pbContainers,
}, nil
}
func (s *server) HostInfo(ctx context.Context, in *pb.HostInfoRequest) (*pb.HostInfoResponse, error) {
host := s.client.Host()
return &pb.HostInfoResponse{
Host: &pb.Host{
Id: host.ID,
Name: host.Name,
CpuCores: uint32(host.NCPU),
Memory: uint32(host.MemTotal),
},
}, nil
}
func (s *server) StreamContainerStarted(in *pb.StreamContainerStartedRequest, out pb.AgentService_StreamContainerStartedServer) error {
containers := make(chan docker.Container)
go s.store.SubscribeNewContainers(out.Context(), containers)
for {
select {
case container := <-containers:
out.Send(&pb.StreamContainerStartedResponse{
Container: &pb.Container{
Id: container.ID,
Name: container.Name,
Image: container.Image,
ImageId: container.ImageID,
Created: timestamppb.New(container.Created),
State: container.State,
Status: container.Status,
Health: container.Health,
Host: container.Host,
Tty: container.Tty,
Labels: container.Labels,
Group: container.Group,
Started: timestamppb.New(*container.StartedAt),
},
})
case <-out.Context().Done():
return nil
}
}
}
func RunServer(client docker.Client, certificates tls.Certificate, listener net.Listener) {
caCertPool := x509.NewCertPool()
c, err := x509.ParseCertificate(certificates.Certificate[0])
if err != nil {
log.Fatalf("failed to parse certificate: %v", err)
}
caCertPool.AddCert(c)
// Create the TLS configuration
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{certificates},
ClientCAs: caCertPool,
ClientAuth: tls.RequireAndVerifyClientCert, // Require client certificates
}
// Create the gRPC server with the credentials
creds := credentials.NewTLS(tlsConfig)
grpcServer := grpc.NewServer(grpc.Creds(creds))
pb.RegisterAgentServiceServer(grpcServer, NewServer(client))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.Infof("gRPC server listening on %s", listener.Addr().String())
if err := grpcServer.Serve(listener); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
func logEventToPb(event *docker.LogEvent) *pb.LogEvent {
var message *anypb.Any
switch data := event.Message.(type) {
case string:
message, _ = anypb.New(&pb.SimpleMessage{
Message: data,
})
case *orderedmap.OrderedMap[string, any]:
message, _ = anypb.New(&pb.ComplexMessage{
Data: orderedMapToJSONBytes(data),
})
case *orderedmap.OrderedMap[string, string]:
message, _ = anypb.New(&pb.ComplexMessage{
Data: orderedMapToJSONBytes(data),
})
default:
log.Fatalf("agent server: unknown type %T", event.Message)
}
return &pb.LogEvent{
Message: message,
Timestamp: timestamppb.New(time.Unix(event.Timestamp, 0)),
Id: event.Id,
ContainerId: event.ContainerID,
Level: event.Level,
Stream: event.Stream,
Position: string(event.Position),
}
}
func orderedMapToJSONBytes[T any](data *orderedmap.OrderedMap[string, T]) []byte {
bytes := bytes.Buffer{}
json.NewEncoder(&bytes).Encode(data)
return bytes.Bytes()
}

View File

@@ -15,4 +15,7 @@ type BeaconEvent struct {
IsSwarmMode bool `json:"isSwarmMode"` IsSwarmMode bool `json:"isSwarmMode"`
ServerVersion string `json:"serverVersion"` ServerVersion string `json:"serverVersion"`
ServerID string `json:"serverID"` ServerID string `json:"serverID"`
Mode string `json:"mode"`
RemoteAgents int `json:"remoteAgents"`
RemoteClients int `json:"remoteClients"`
} }

View File

@@ -63,13 +63,13 @@ type DockerCLI interface {
type Client interface { type Client interface {
ListContainers() ([]Container, error) ListContainers() ([]Container, error)
FindContainer(string) (Container, error) FindContainer(string) (Container, error)
ContainerLogs(context.Context, string, *time.Time, StdType) (io.ReadCloser, error) ContainerLogs(context.Context, string, time.Time, StdType) (io.ReadCloser, error)
Events(context.Context, chan<- ContainerEvent) error ContainerEvents(context.Context, chan<- ContainerEvent) error
ContainerLogsBetweenDates(context.Context, string, time.Time, time.Time, StdType) (io.ReadCloser, error) ContainerLogsBetweenDates(context.Context, string, time.Time, time.Time, StdType) (io.ReadCloser, error)
ContainerStats(context.Context, string, chan<- ContainerStat) error ContainerStats(context.Context, string, chan<- ContainerStat) error
Ping(context.Context) (types.Ping, error) Ping(context.Context) (types.Ping, error)
Host() *Host Host() Host
ContainerActions(action string, containerID string) error ContainerActions(action ContainerAction, containerID string) error
IsSwarmMode() bool IsSwarmMode() bool
SystemInfo() system.Info SystemInfo() system.Info
} }
@@ -77,17 +77,18 @@ type Client interface {
type httpClient struct { type httpClient struct {
cli DockerCLI cli DockerCLI
filters filters.Args filters filters.Args
host *Host host Host
info system.Info info system.Info
} }
func NewClient(cli DockerCLI, filters filters.Args, host *Host) Client { func NewClient(cli DockerCLI, filters filters.Args, host Host) Client {
client := &httpClient{ client := &httpClient{
cli: cli, cli: cli,
filters: filters, filters: filters,
host: host, host: host,
} }
if host.MemTotal == 0 || host.NCPU == 0 {
var err error var err error
client.info, err = cli.Info(context.Background()) client.info, err = cli.Info(context.Background())
if err != nil { if err != nil {
@@ -96,12 +97,13 @@ func NewClient(cli DockerCLI, filters filters.Args, host *Host) Client {
host.NCPU = client.info.NCPU host.NCPU = client.info.NCPU
host.MemTotal = client.info.MemTotal host.MemTotal = client.info.MemTotal
}
return client return client
} }
// NewClientWithFilters creates a new instance of Client with docker filters // NewClientWithFilters creates a new instance of Client with docker filters
func NewClientWithFilters(f map[string][]string) (Client, error) { func NewLocalClient(f map[string][]string, hostname string) (Client, error) {
filterArgs := filters.NewArgs() filterArgs := filters.NewArgs()
for key, values := range f { for key, values := range f {
for _, value := range values { for _, value := range values {
@@ -117,10 +119,27 @@ func NewClientWithFilters(f map[string][]string) (Client, error) {
return nil, err return nil, err
} }
return NewClient(cli, filterArgs, &Host{Name: "localhost", ID: "localhost"}), nil info, err := cli.Info(context.Background())
if err != nil {
return nil, err
} }
func NewClientWithTlsAndFilter(f map[string][]string, host Host) (Client, error) { host := Host{
ID: info.ID,
Name: info.Name,
MemTotal: info.MemTotal,
NCPU: info.NCPU,
Endpoint: "local",
}
if hostname != "" {
host.Name = hostname
}
return NewClient(cli, filterArgs, host), nil
}
func NewRemoteClient(f map[string][]string, host Host) (Client, error) {
filterArgs := filters.NewArgs() filterArgs := filters.NewArgs()
for key, values := range f { for key, values := range f {
for _, value := range values { for _, value := range values {
@@ -153,10 +172,11 @@ func NewClientWithTlsAndFilter(f map[string][]string, host Host) (Client, error)
return nil, err return nil, err
} }
return NewClient(cli, filterArgs, &host), nil return NewClient(cli, filterArgs, host), nil
} }
func (d *httpClient) FindContainer(id string) (Container, error) { func (d *httpClient) FindContainer(id string) (Container, error) {
log.Debugf("finding container with id: %s", id)
var container Container var container Container
containers, err := d.ListContainers() containers, err := d.ListContainers()
if err != nil { if err != nil {
@@ -188,13 +208,13 @@ func (d *httpClient) FindContainer(id string) (Container, error) {
return container, nil return container, nil
} }
func (d *httpClient) ContainerActions(action string, containerID string) error { func (d *httpClient) ContainerActions(action ContainerAction, containerID string) error {
switch action { switch action {
case "start": case Start:
return d.cli.ContainerStart(context.Background(), containerID, container.StartOptions{}) return d.cli.ContainerStart(context.Background(), containerID, container.StartOptions{})
case "stop": case Stop:
return d.cli.ContainerStop(context.Background(), containerID, container.StopOptions{}) return d.cli.ContainerStop(context.Background(), containerID, container.StopOptions{})
case "restart": case Restart:
return d.cli.ContainerRestart(context.Background(), containerID, container.StopOptions{}) return d.cli.ContainerRestart(context.Background(), containerID, container.StopOptions{})
default: default:
return fmt.Errorf("unknown action: %s", action) return fmt.Errorf("unknown action: %s", action)
@@ -299,14 +319,10 @@ func (d *httpClient) ContainerStats(ctx context.Context, id string, stats chan<-
} }
} }
func (d *httpClient) ContainerLogs(ctx context.Context, id string, since *time.Time, stdType StdType) (io.ReadCloser, error) { func (d *httpClient) ContainerLogs(ctx context.Context, id string, since time.Time, stdType StdType) (io.ReadCloser, error) {
log.WithField("id", id).WithField("since", since).WithField("stdType", stdType).Debug("streaming logs for container") log.WithField("id", id).WithField("since", since).WithField("stdType", stdType).Debug("streaming logs for container")
sinceQuery := "" sinceQuery := since.Add(time.Millisecond).Format(time.RFC3339Nano)
if since != nil {
sinceQuery = since.Add(time.Millisecond).Format(time.RFC3339Nano)
}
options := container.LogsOptions{ options := container.LogsOptions{
ShowStdout: stdType&STDOUT != 0, ShowStdout: stdType&STDOUT != 0,
ShowStderr: stdType&STDERR != 0, ShowStderr: stdType&STDERR != 0,
@@ -324,7 +340,7 @@ func (d *httpClient) ContainerLogs(ctx context.Context, id string, since *time.T
return reader, nil return reader, nil
} }
func (d *httpClient) Events(ctx context.Context, messages chan<- ContainerEvent) error { func (d *httpClient) ContainerEvents(ctx context.Context, messages chan<- ContainerEvent) error {
dockerMessages, err := d.cli.Events(ctx, events.ListOptions{}) dockerMessages, err := d.cli.Events(ctx, events.ListOptions{})
for { for {
@@ -344,7 +360,6 @@ func (d *httpClient) Events(ctx context.Context, messages chan<- ContainerEvent)
} }
} }
} }
} }
func (d *httpClient) ContainerLogsBetweenDates(ctx context.Context, id string, from time.Time, to time.Time, stdType StdType) (io.ReadCloser, error) { func (d *httpClient) ContainerLogsBetweenDates(ctx context.Context, id string, from time.Time, to time.Time, stdType StdType) (io.ReadCloser, error) {
@@ -370,7 +385,7 @@ func (d *httpClient) Ping(ctx context.Context) (types.Ping, error) {
return d.cli.Ping(ctx) return d.cli.Ping(ctx)
} }
func (d *httpClient) Host() *Host { func (d *httpClient) Host() Host {
return d.host return d.host
} }

View File

@@ -65,7 +65,6 @@ func (m *mockedProxy) ContainerStart(ctx context.Context, containerID string, op
} }
func (m *mockedProxy) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { func (m *mockedProxy) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error {
args := m.Called(ctx, containerID, options) args := m.Called(ctx, containerID, options)
err := args.Get(0) err := args.Get(0)
@@ -91,7 +90,7 @@ func (m *mockedProxy) ContainerRestart(ctx context.Context, containerID string,
func Test_dockerClient_ListContainers_null(t *testing.T) { func Test_dockerClient_ListContainers_null(t *testing.T) {
proxy := new(mockedProxy) proxy := new(mockedProxy)
proxy.On("ContainerList", mock.Anything, mock.Anything).Return(nil, nil) proxy.On("ContainerList", mock.Anything, mock.Anything).Return(nil, nil)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
list, err := client.ListContainers() list, err := client.ListContainers()
assert.Empty(t, list, "list should be empty") assert.Empty(t, list, "list should be empty")
@@ -103,7 +102,7 @@ func Test_dockerClient_ListContainers_null(t *testing.T) {
func Test_dockerClient_ListContainers_error(t *testing.T) { func Test_dockerClient_ListContainers_error(t *testing.T) {
proxy := new(mockedProxy) proxy := new(mockedProxy)
proxy.On("ContainerList", mock.Anything, mock.Anything).Return(nil, errors.New("test")) proxy.On("ContainerList", mock.Anything, mock.Anything).Return(nil, errors.New("test"))
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
list, err := client.ListContainers() list, err := client.ListContainers()
assert.Nil(t, list, "list should be nil") assert.Nil(t, list, "list should be nil")
@@ -126,7 +125,7 @@ func Test_dockerClient_ListContainers_happy(t *testing.T) {
proxy := new(mockedProxy) proxy := new(mockedProxy)
proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil) proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
list, err := client.ListContainers() list, err := client.ListContainers()
require.NoError(t, err, "error should not return an error.") require.NoError(t, err, "error should not return an error.")
@@ -160,8 +159,8 @@ func Test_dockerClient_ContainerLogs_happy(t *testing.T) {
Since: "2021-01-01T00:00:00.001Z"} Since: "2021-01-01T00:00:00.001Z"}
proxy.On("ContainerLogs", mock.Anything, id, options).Return(reader, nil) proxy.On("ContainerLogs", mock.Anything, id, options).Return(reader, nil)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
logReader, _ := client.ContainerLogs(context.Background(), id, &since, STDALL) logReader, _ := client.ContainerLogs(context.Background(), id, since, STDALL)
actual, _ := io.ReadAll(logReader) actual, _ := io.ReadAll(logReader)
assert.Equal(t, string(b), string(actual), "message doesn't match expected") assert.Equal(t, string(b), string(actual), "message doesn't match expected")
@@ -174,9 +173,9 @@ func Test_dockerClient_ContainerLogs_error(t *testing.T) {
proxy.On("ContainerLogs", mock.Anything, id, mock.Anything).Return(nil, errors.New("test")) proxy.On("ContainerLogs", mock.Anything, id, mock.Anything).Return(nil, errors.New("test"))
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
reader, err := client.ContainerLogs(context.Background(), id, nil, STDALL) reader, err := client.ContainerLogs(context.Background(), id, time.Time{}, STDALL)
assert.Nil(t, reader, "reader should be nil") assert.Nil(t, reader, "reader should be nil")
assert.Error(t, err, "error should have been returned") assert.Error(t, err, "error should have been returned")
@@ -202,7 +201,7 @@ func Test_dockerClient_FindContainer_happy(t *testing.T) {
json := types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{State: state}, Config: &container.Config{Tty: false}} json := types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{State: state}, Config: &container.Config{Tty: false}}
proxy.On("ContainerInspect", mock.Anything, "abcdefghijkl").Return(json, nil) proxy.On("ContainerInspect", mock.Anything, "abcdefghijkl").Return(json, nil)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
container, err := client.FindContainer("abcdefghijkl") container, err := client.FindContainer("abcdefghijkl")
require.NoError(t, err, "error should not be thrown") require.NoError(t, err, "error should not be thrown")
@@ -225,7 +224,7 @@ func Test_dockerClient_FindContainer_error(t *testing.T) {
proxy := new(mockedProxy) proxy := new(mockedProxy)
proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil) proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
_, err := client.FindContainer("not_valid") _, err := client.FindContainer("not_valid")
require.Error(t, err, "error should be thrown") require.Error(t, err, "error should be thrown")
@@ -246,7 +245,7 @@ func Test_dockerClient_ContainerActions_happy(t *testing.T) {
} }
proxy := new(mockedProxy) proxy := new(mockedProxy)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
state := &types.ContainerState{Status: "running", StartedAt: time.Now().Format(time.RFC3339Nano)} state := &types.ContainerState{Status: "running", StartedAt: time.Now().Format(time.RFC3339Nano)}
json := types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{State: state}, Config: &container.Config{Tty: false}} json := types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{State: state}, Config: &container.Config{Tty: false}}
@@ -264,7 +263,7 @@ func Test_dockerClient_ContainerActions_happy(t *testing.T) {
actions := []string{"start", "stop", "restart"} actions := []string{"start", "stop", "restart"}
for _, action := range actions { for _, action := range actions {
err := client.ContainerActions(action, container.ID) err := client.ContainerActions(ContainerAction(action), container.ID)
require.NoError(t, err, "error should not be thrown") require.NoError(t, err, "error should not be thrown")
assert.Equal(t, err, nil) assert.Equal(t, err, nil)
} }
@@ -285,7 +284,7 @@ func Test_dockerClient_ContainerActions_error(t *testing.T) {
} }
proxy := new(mockedProxy) proxy := new(mockedProxy)
client := &httpClient{proxy, filters.NewArgs(), &Host{ID: "localhost"}, system.Info{}} client := &httpClient{proxy, filters.NewArgs(), Host{ID: "localhost"}, system.Info{}}
proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil) proxy.On("ContainerList", mock.Anything, mock.Anything).Return(containers, nil)
proxy.On("ContainerStart", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("test")) proxy.On("ContainerStart", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("test"))
@@ -297,7 +296,7 @@ func Test_dockerClient_ContainerActions_error(t *testing.T) {
actions := []string{"start", "stop", "restart"} actions := []string{"start", "stop", "restart"}
for _, action := range actions { for _, action := range actions {
err := client.ContainerActions(action, container.ID) err := client.ContainerActions(ContainerAction(action), container.ID)
require.Error(t, err, "error should be thrown") require.Error(t, err, "error should be thrown")
assert.Error(t, err, "error should have been returned") assert.Error(t, err, "error should have been returned")
} }

View File

@@ -7,13 +7,14 @@ import (
"sync/atomic" "sync/atomic"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
lop "github.com/samber/lo/parallel"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type ContainerStore struct { type ContainerStore struct {
containers *xsync.MapOf[string, *Container] containers *xsync.MapOf[string, *Container]
subscribers *xsync.MapOf[context.Context, chan ContainerEvent] subscribers *xsync.MapOf[context.Context, chan<- ContainerEvent]
newContainerSubscribers *xsync.MapOf[context.Context, chan Container] newContainerSubscribers *xsync.MapOf[context.Context, chan<- Container]
client Client client Client
statsCollector *StatsCollector statsCollector *StatsCollector
wg sync.WaitGroup wg sync.WaitGroup
@@ -26,8 +27,8 @@ func NewContainerStore(ctx context.Context, client Client) *ContainerStore {
s := &ContainerStore{ s := &ContainerStore{
containers: xsync.NewMapOf[string, *Container](), containers: xsync.NewMapOf[string, *Container](),
client: client, client: client,
subscribers: xsync.NewMapOf[context.Context, chan ContainerEvent](), subscribers: xsync.NewMapOf[context.Context, chan<- ContainerEvent](),
newContainerSubscribers: xsync.NewMapOf[context.Context, chan Container](), newContainerSubscribers: xsync.NewMapOf[context.Context, chan<- Container](),
statsCollector: NewStatsCollector(client), statsCollector: NewStatsCollector(client),
wg: sync.WaitGroup{}, wg: sync.WaitGroup{},
events: make(chan ContainerEvent), events: make(chan ContainerEvent),
@@ -41,11 +42,13 @@ func NewContainerStore(ctx context.Context, client Client) *ContainerStore {
return s return s
} }
var ErrContainerNotFound = errors.New("container not found")
func (s *ContainerStore) checkConnectivity() error { func (s *ContainerStore) checkConnectivity() error {
if s.connected.CompareAndSwap(false, true) { if s.connected.CompareAndSwap(false, true) {
go func() { go func() {
log.Debugf("subscribing to docker events from container store %s", s.client.Host()) log.Debugf("subscribing to docker events from container store %s", s.client.Host())
err := s.client.Events(s.ctx, s.events) err := s.client.ContainerEvents(s.ctx, s.events)
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {
log.Errorf("docker store unexpectedly disconnected from docker events from %s with %v", s.client.Host(), err) log.Errorf("docker store unexpectedly disconnected from docker events from %s with %v", s.client.Host(), err)
} }
@@ -56,16 +59,17 @@ func (s *ContainerStore) checkConnectivity() error {
return err return err
} else { } else {
s.containers.Clear() s.containers.Clear()
for _, c := range containers { lop.ForEach(containers, func(c Container, _ int) {
s.containers.Store(c.ID, &c) container, _ := s.client.FindContainer(c.ID)
} s.containers.Store(c.ID, &container)
})
} }
} }
return nil return nil
} }
func (s *ContainerStore) List() ([]Container, error) { func (s *ContainerStore) ListContainers() ([]Container, error) {
s.wg.Wait() s.wg.Wait()
if err := s.checkConnectivity(); err != nil { if err := s.checkConnectivity(); err != nil {
@@ -80,11 +84,27 @@ func (s *ContainerStore) List() ([]Container, error) {
return containers, nil return containers, nil
} }
func (s *ContainerStore) FindContainer(id string) (Container, error) {
list, err := s.ListContainers()
if err != nil {
return Container{}, err
}
for _, c := range list {
if c.ID == id {
return c, nil
}
}
log.Warnf("container %s not found in store", id)
return Container{}, ErrContainerNotFound
}
func (s *ContainerStore) Client() Client { func (s *ContainerStore) Client() Client {
return s.client return s.client
} }
func (s *ContainerStore) Subscribe(ctx context.Context, events chan ContainerEvent) { func (s *ContainerStore) SubscribeEvents(ctx context.Context, events chan<- ContainerEvent) {
go func() { go func() {
if s.statsCollector.Start(s.ctx) { if s.statsCollector.Start(s.ctx) {
log.Debug("clearing container stats as stats collector has been stopped") log.Debug("clearing container stats as stats collector has been stopped")
@@ -96,19 +116,23 @@ func (s *ContainerStore) Subscribe(ctx context.Context, events chan ContainerEve
}() }()
s.subscribers.Store(ctx, events) s.subscribers.Store(ctx, events)
} go func() {
<-ctx.Done()
func (s *ContainerStore) Unsubscribe(ctx context.Context) {
s.subscribers.Delete(ctx) s.subscribers.Delete(ctx)
s.statsCollector.Stop() s.statsCollector.Stop()
}()
} }
func (s *ContainerStore) SubscribeStats(ctx context.Context, stats chan ContainerStat) { func (s *ContainerStore) SubscribeStats(ctx context.Context, stats chan<- ContainerStat) {
s.statsCollector.Subscribe(ctx, stats) s.statsCollector.Subscribe(ctx, stats)
} }
func (s *ContainerStore) SubscribeNewContainers(ctx context.Context, containers chan Container) { func (s *ContainerStore) SubscribeNewContainers(ctx context.Context, containers chan<- Container) {
s.newContainerSubscribers.Store(ctx, containers) s.newContainerSubscribers.Store(ctx, containers)
go func() {
<-ctx.Done()
s.newContainerSubscribers.Delete(ctx)
}()
} }
func (s *ContainerStore) init() { func (s *ContainerStore) init() {
@@ -128,11 +152,10 @@ func (s *ContainerStore) init() {
if container, err := s.client.FindContainer(event.ActorID); err == nil { if container, err := s.client.FindContainer(event.ActorID); err == nil {
log.Debugf("container %s started", container.ID) log.Debugf("container %s started", container.ID)
s.containers.Store(container.ID, &container) s.containers.Store(container.ID, &container)
s.newContainerSubscribers.Range(func(c context.Context, containers chan Container) bool { s.newContainerSubscribers.Range(func(c context.Context, containers chan<- Container) bool {
select { select {
case containers <- container: case containers <- container:
case <-c.Done(): case <-c.Done():
s.newContainerSubscribers.Delete(c)
} }
return true return true
}) })
@@ -167,7 +190,7 @@ func (s *ContainerStore) init() {
} }
}) })
} }
s.subscribers.Range(func(c context.Context, events chan ContainerEvent) bool { s.subscribers.Range(func(c context.Context, events chan<- ContainerEvent) bool {
select { select {
case events <- event: case events <- event:
case <-c.Done(): case <-c.Done():

View File

@@ -24,7 +24,7 @@ func (m *mockedClient) FindContainer(id string) (Container, error) {
return args.Get(0).(Container), args.Error(1) return args.Get(0).(Container), args.Error(1)
} }
func (m *mockedClient) Events(ctx context.Context, events chan<- ContainerEvent) error { func (m *mockedClient) ContainerEvents(ctx context.Context, events chan<- ContainerEvent) error {
args := m.Called(ctx, events) args := m.Called(ctx, events)
return args.Error(0) return args.Error(0)
} }
@@ -34,9 +34,9 @@ func (m *mockedClient) ContainerStats(ctx context.Context, id string, stats chan
return args.Error(0) return args.Error(0)
} }
func (m *mockedClient) Host() *Host { func (m *mockedClient) Host() Host {
args := m.Called() args := m.Called()
return args.Get(0).(*Host) return args.Get(0).(Host)
} }
func TestContainerStore_List(t *testing.T) { func TestContainerStore_List(t *testing.T) {
@@ -48,18 +48,26 @@ func TestContainerStore_List(t *testing.T) {
Name: "test", Name: "test",
}, },
}, nil) }, nil)
client.On("Events", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) { client.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context) ctx := args.Get(0).(context.Context)
<-ctx.Done() <-ctx.Done()
}) })
client.On("Host").Return(&Host{ client.On("Host").Return(Host{
ID: "localhost", ID: "localhost",
}) })
client.On("FindContainer", "1234").Return(Container{
ID: "1234",
Name: "test",
Image: "test",
Stats: utils.NewRingBuffer[ContainerStat](300),
}, nil)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
store := NewContainerStore(ctx, client) store := NewContainerStore(ctx, client)
containers, _ := store.List() containers, _ := store.ListContainers()
assert.Equal(t, containers[0].ID, "1234") assert.Equal(t, containers[0].ID, "1234")
} }
@@ -75,7 +83,7 @@ func TestContainerStore_die(t *testing.T) {
}, },
}, nil) }, nil)
client.On("Events", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil). client.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context) ctx := args.Get(0).(context.Context)
events := args.Get(1).(chan<- ContainerEvent) events := args.Get(1).(chan<- ContainerEvent)
@@ -86,21 +94,28 @@ func TestContainerStore_die(t *testing.T) {
} }
<-ctx.Done() <-ctx.Done()
}) })
client.On("Host").Return(&Host{ client.On("Host").Return(Host{
ID: "localhost", ID: "localhost",
}) })
client.On("ContainerStats", mock.Anything, "1234", mock.AnythingOfType("chan<- docker.ContainerStat")).Return(nil) client.On("ContainerStats", mock.Anything, "1234", mock.AnythingOfType("chan<- docker.ContainerStat")).Return(nil)
client.On("FindContainer", "1234").Return(Container{
ID: "1234",
Name: "test",
Image: "test",
Stats: utils.NewRingBuffer[ContainerStat](300),
}, nil)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
store := NewContainerStore(ctx, client) store := NewContainerStore(ctx, client)
// Wait until we get the event // Wait until we get the event
events := make(chan ContainerEvent) events := make(chan ContainerEvent)
store.Subscribe(ctx, events) store.SubscribeEvents(ctx, events)
<-events <-events
containers, _ := store.List() containers, _ := store.ListContainers()
assert.Equal(t, containers[0].State, "exited") assert.Equal(t, containers[0].State, "exited")
} }

View File

@@ -197,24 +197,24 @@ func checkPosition(currentEvent *LogEvent, nextEvent *LogEvent) {
currentLevel := guessLogLevel(currentEvent) currentLevel := guessLogLevel(currentEvent)
if nextEvent != nil { if nextEvent != nil {
if currentEvent.IsCloseToTime(nextEvent) && currentLevel != "" && !nextEvent.HasLevel() { if currentEvent.IsCloseToTime(nextEvent) && currentLevel != "" && !nextEvent.HasLevel() {
currentEvent.Position = START currentEvent.Position = Beginning
nextEvent.Position = MIDDLE nextEvent.Position = Middle
} }
// If next item is not close to current item or has level, set current item position to end // If next item is not close to current item or has level, set current item position to end
if currentEvent.Position == MIDDLE && (nextEvent.HasLevel() || !currentEvent.IsCloseToTime(nextEvent)) { if currentEvent.Position == Middle && (nextEvent.HasLevel() || !currentEvent.IsCloseToTime(nextEvent)) {
currentEvent.Position = END currentEvent.Position = End
} }
// If next item is close to current item and has no level, set next item position to middle // If next item is close to current item and has no level, set next item position to middle
if currentEvent.Position == MIDDLE && !nextEvent.HasLevel() && currentEvent.IsCloseToTime(nextEvent) { if currentEvent.Position == Middle && !nextEvent.HasLevel() && currentEvent.IsCloseToTime(nextEvent) {
nextEvent.Position = MIDDLE nextEvent.Position = Middle
} }
// Set next item level to current item level // Set next item level to current item level
if currentEvent.Position == START || currentEvent.Position == MIDDLE { if currentEvent.Position == Beginning || currentEvent.Position == Middle {
nextEvent.Level = currentEvent.Level nextEvent.Level = currentEvent.Level
} }
} else if currentEvent.Position == MIDDLE { } else if currentEvent.Position == Middle {
currentEvent.Position = END currentEvent.Position = End
} }
} }

View File

@@ -20,10 +20,11 @@ type Host struct {
ValidCerts bool `json:"-"` ValidCerts bool `json:"-"`
NCPU int `json:"nCPU"` NCPU int `json:"nCPU"`
MemTotal int64 `json:"memTotal"` MemTotal int64 `json:"memTotal"`
Endpoint string `json:"endpoint"`
} }
func (h *Host) String() string { func (h Host) String() string {
return h.ID return fmt.Sprintf("ID: %s, Endpoint: %s", h.ID, h.Endpoint)
} }
func ParseConnection(connection string) (Host, error) { func ParseConnection(connection string) (Host, error) {
@@ -72,6 +73,7 @@ func ParseConnection(connection string) (Host, error) {
CACertPath: cacertPath, CACertPath: cacertPath,
KeyPath: keyPath, KeyPath: keyPath,
ValidCerts: hasCerts, ValidCerts: hasCerts,
Endpoint: remoteUrl.String(),
}, nil }, nil
} }

View File

@@ -14,7 +14,7 @@ import (
type StatsCollector struct { type StatsCollector struct {
stream chan ContainerStat stream chan ContainerStat
subscribers *xsync.MapOf[context.Context, chan ContainerStat] subscribers *xsync.MapOf[context.Context, chan<- ContainerStat]
client Client client Client
cancelers *xsync.MapOf[string, context.CancelFunc] cancelers *xsync.MapOf[string, context.CancelFunc]
stopper context.CancelFunc stopper context.CancelFunc
@@ -28,14 +28,18 @@ var timeToStop = 6 * time.Hour
func NewStatsCollector(client Client) *StatsCollector { func NewStatsCollector(client Client) *StatsCollector {
return &StatsCollector{ return &StatsCollector{
stream: make(chan ContainerStat), stream: make(chan ContainerStat),
subscribers: xsync.NewMapOf[context.Context, chan ContainerStat](), subscribers: xsync.NewMapOf[context.Context, chan<- ContainerStat](),
client: client, client: client,
cancelers: xsync.NewMapOf[string, context.CancelFunc](), cancelers: xsync.NewMapOf[string, context.CancelFunc](),
} }
} }
func (c *StatsCollector) Subscribe(ctx context.Context, stats chan ContainerStat) { func (c *StatsCollector) Subscribe(ctx context.Context, stats chan<- ContainerStat) {
c.subscribers.Store(ctx, stats) c.subscribers.Store(ctx, stats)
go func() {
<-ctx.Done()
c.subscribers.Delete(ctx)
}()
} }
func (c *StatsCollector) forceStop() { func (c *StatsCollector) forceStop() {
@@ -109,7 +113,7 @@ func (sc *StatsCollector) Start(parentCtx context.Context) bool {
go func() { go func() {
log.Debugf("subscribing to docker events from stats collector %s", sc.client.Host()) log.Debugf("subscribing to docker events from stats collector %s", sc.client.Host())
err := sc.client.Events(context.Background(), events) err := sc.client.ContainerEvents(context.Background(), events)
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {
log.Errorf("stats collector unexpectedly disconnected from docker events from %s with %v", sc.client.Host(), err) log.Errorf("stats collector unexpectedly disconnected from docker events from %s with %v", sc.client.Host(), err)
} }
@@ -136,7 +140,7 @@ func (sc *StatsCollector) Start(parentCtx context.Context) bool {
log.Info("stopped collecting container stats") log.Info("stopped collecting container stats")
return true return true
case stat := <-sc.stream: case stat := <-sc.stream:
sc.subscribers.Range(func(c context.Context, stats chan ContainerStat) bool { sc.subscribers.Range(func(c context.Context, stats chan<- ContainerStat) bool {
select { select {
case stats <- stat: case stats <- stat:
case <-c.Done(): case <-c.Done():

View File

@@ -17,7 +17,7 @@ func startedCollector(ctx context.Context) *StatsCollector {
State: "running", State: "running",
}, },
}, nil) }, nil)
client.On("Events", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")). client.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).
Return(nil). Return(nil).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context) ctx := args.Get(0).(context.Context)
@@ -31,7 +31,7 @@ func startedCollector(ctx context.Context) *StatsCollector {
ID: "1234", ID: "1234",
} }
}) })
client.On("Host").Return(&Host{ client.On("Host").Return(Host{
ID: "localhost", ID: "localhost",
}) })

View File

@@ -1,6 +1,7 @@
package docker package docker
import ( import (
"fmt"
"math" "math"
"time" "time"
@@ -45,11 +46,29 @@ type ContainerEvent struct {
type LogPosition string type LogPosition string
const ( const (
START LogPosition = "start" Beginning LogPosition = "start"
MIDDLE LogPosition = "middle" Middle LogPosition = "middle"
END LogPosition = "end" End LogPosition = "end"
) )
type ContainerAction string
const (
Start ContainerAction = "start"
Stop ContainerAction = "stop"
Restart ContainerAction = "restart"
)
func ParseContainerAction(input string) (ContainerAction, error) {
action := ContainerAction(input)
switch action {
case Start, Stop, Restart:
return action, nil
default:
return "", fmt.Errorf("unknown action: %s", input)
}
}
type LogEvent struct { type LogEvent struct {
Message any `json:"m,omitempty"` Message any `json:"m,omitempty"`
Timestamp int64 `json:"ts"` Timestamp int64 `json:"ts"`

View File

@@ -0,0 +1,18 @@
package healthcheck
import (
"crypto/tls"
"github.com/amir20/dozzle/internal/agent"
log "github.com/sirupsen/logrus"
)
func RPCRequest(addr string, certs tls.Certificate) error {
client, err := agent.NewClient(addr, certs)
if err != nil {
log.Fatalf("Failed to create agent client: %v", err)
}
containers, err := client.ListContainers()
log.Tracef("Found %d containers.", len(containers))
return err
}

View File

@@ -0,0 +1,28 @@
package cli
import (
"github.com/amir20/dozzle/internal/analytics"
"github.com/amir20/dozzle/internal/docker"
log "github.com/sirupsen/logrus"
)
func StartEvent(version string, mode string, agents []string, remoteClients []string, client docker.Client) {
event := analytics.BeaconEvent{
Name: "start",
Version: version,
Mode: mode,
RemoteAgents: len(agents),
RemoteClients: len(remoteClients),
}
if client != nil {
event.ServerID = client.SystemInfo().ID
event.ServerVersion = client.SystemInfo().ServerVersion
} else {
event.ServerID = "n/a"
}
if err := analytics.SendBeacon(event); err != nil {
log.Debug(err)
}
}

View File

@@ -0,0 +1,20 @@
package cli
import (
"crypto/tls"
"embed"
)
func ReadCertificates(certs embed.FS) (tls.Certificate, error) {
cert, err := certs.ReadFile("shared_cert.pem")
if err != nil {
return tls.Certificate{}, err
}
key, err := certs.ReadFile("shared_key.pem")
if err != nil {
return tls.Certificate{}, err
}
return tls.X509KeyPair(cert, key)
}

View File

@@ -0,0 +1,17 @@
package cli
import (
log "github.com/sirupsen/logrus"
)
func ConfigureLogger(level string) {
if l, err := log.ParseLevel(level); err == nil {
log.SetLevel(l)
} else {
panic(err)
}
log.SetFormatter(&log.TextFormatter{
DisableLevelTruncation: true,
})
}

View File

@@ -0,0 +1,32 @@
package cli
import (
"os"
"reflect"
"strings"
log "github.com/sirupsen/logrus"
)
func ValidateEnvVars(types ...interface{}) {
expectedEnvs := make(map[string]bool)
for _, t := range types {
typ := reflect.TypeOf(t)
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
for _, tag := range strings.Split(field.Tag.Get("arg"), ",") {
if strings.HasPrefix(tag, "env:") {
expectedEnvs[strings.TrimPrefix(tag, "env:")] = true
}
}
}
}
for _, env := range os.Environ() {
actual := strings.Split(env, "=")[0]
if strings.HasPrefix(actual, "DOZZLE_") && !expectedEnvs[actual] {
log.Warnf("Unexpected environment variable %s", actual)
}
}
}

View File

@@ -0,0 +1,60 @@
package docker_support
import (
"context"
"io"
"time"
"github.com/amir20/dozzle/internal/agent"
"github.com/amir20/dozzle/internal/docker"
)
type agentService struct {
client *agent.Client
}
func NewAgentService(client *agent.Client) ClientService {
return &agentService{
client: client,
}
}
func (a *agentService) FindContainer(id string) (docker.Container, error) {
return a.client.FindContainer(id)
}
func (a *agentService) RawLogs(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (io.ReadCloser, error) {
return a.client.StreamRawBytes(ctx, container.ID, from, to, stdTypes)
}
func (a *agentService) LogsBetweenDates(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (<-chan *docker.LogEvent, error) {
return a.client.LogsBetweenDates(ctx, container.ID, from, to, stdTypes)
}
func (a *agentService) StreamLogs(ctx context.Context, container docker.Container, from time.Time, stdTypes docker.StdType, events chan<- *docker.LogEvent) error {
return a.client.StreamContainerLogs(ctx, container.ID, from, stdTypes, events)
}
func (a *agentService) ListContainers() ([]docker.Container, error) {
return a.client.ListContainers()
}
func (a *agentService) Host() docker.Host {
return a.client.Host()
}
func (a *agentService) SubscribeStats(ctx context.Context, stats chan<- docker.ContainerStat) {
go a.client.StreamStats(ctx, stats)
}
func (a *agentService) SubscribeEvents(ctx context.Context, events chan<- docker.ContainerEvent) {
go a.client.StreamEvents(ctx, events)
}
func (d *agentService) SubscribeContainersStarted(ctx context.Context, containers chan<- docker.Container) {
go d.client.StreamNewContainers(ctx, containers)
}
func (a *agentService) ContainerAction(container docker.Container, action docker.ContainerAction) error {
panic("not implemented")
}

View File

@@ -0,0 +1,107 @@
package docker_support
import (
"context"
"io"
"time"
"github.com/amir20/dozzle/internal/docker"
)
type ClientService interface {
FindContainer(id string) (docker.Container, error)
ListContainers() ([]docker.Container, error)
Host() docker.Host
ContainerAction(container docker.Container, action docker.ContainerAction) error
LogsBetweenDates(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (<-chan *docker.LogEvent, error)
RawLogs(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (io.ReadCloser, error)
// Subscriptions
SubscribeStats(ctx context.Context, stats chan<- docker.ContainerStat)
SubscribeEvents(ctx context.Context, events chan<- docker.ContainerEvent)
SubscribeContainersStarted(ctx context.Context, containers chan<- docker.Container)
// Blocking streaming functions that should be used in a goroutine
StreamLogs(ctx context.Context, container docker.Container, from time.Time, stdTypes docker.StdType, events chan<- *docker.LogEvent) error
}
type dockerClientService struct {
client docker.Client
store *docker.ContainerStore
}
func NewDockerClientService(client docker.Client) ClientService {
return &dockerClientService{
client: client,
store: docker.NewContainerStore(context.Background(), client),
}
}
func (d *dockerClientService) RawLogs(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (io.ReadCloser, error) {
return d.client.ContainerLogsBetweenDates(ctx, container.ID, from, to, stdTypes)
}
func (d *dockerClientService) LogsBetweenDates(ctx context.Context, container docker.Container, from time.Time, to time.Time, stdTypes docker.StdType) (<-chan *docker.LogEvent, error) {
reader, err := d.client.ContainerLogsBetweenDates(ctx, container.ID, from, to, stdTypes)
if err != nil {
return nil, err
}
g := docker.NewEventGenerator(reader, container)
return g.Events, nil
}
func (d *dockerClientService) StreamLogs(ctx context.Context, container docker.Container, from time.Time, stdTypes docker.StdType, events chan<- *docker.LogEvent) error {
reader, err := d.client.ContainerLogs(ctx, container.ID, from, stdTypes)
if err != nil {
return err
}
g := docker.NewEventGenerator(reader, container)
for event := range g.Events {
events <- event
}
select {
case e := <-g.Errors:
return e
default:
return nil
}
}
func (d *dockerClientService) FindContainer(id string) (docker.Container, error) {
container, err := d.store.FindContainer(id)
if err != nil {
if err == docker.ErrContainerNotFound {
return d.client.FindContainer(id)
} else {
return docker.Container{}, err
}
}
return container, nil
}
func (d *dockerClientService) ContainerAction(container docker.Container, action docker.ContainerAction) error {
return d.client.ContainerActions(action, container.ID)
}
func (d *dockerClientService) ListContainers() ([]docker.Container, error) {
return d.store.ListContainers()
}
func (d *dockerClientService) Host() docker.Host {
return d.client.Host()
}
func (d *dockerClientService) SubscribeStats(ctx context.Context, stats chan<- docker.ContainerStat) {
d.store.SubscribeStats(ctx, stats)
}
func (d *dockerClientService) SubscribeEvents(ctx context.Context, events chan<- docker.ContainerEvent) {
d.store.SubscribeEvents(ctx, events)
}
func (d *dockerClientService) SubscribeContainersStarted(ctx context.Context, containers chan<- docker.Container) {
d.store.SubscribeNewContainers(ctx, containers)
}

View File

@@ -0,0 +1,30 @@
package docker_support
import (
"context"
"io"
"time"
"github.com/amir20/dozzle/internal/docker"
)
type containerService struct {
clientService ClientService
Container docker.Container
}
func (c *containerService) RawLogs(ctx context.Context, from time.Time, to time.Time, stdTypes docker.StdType) (io.ReadCloser, error) {
return c.clientService.RawLogs(ctx, c.Container, from, to, stdTypes)
}
func (c *containerService) LogsBetweenDates(ctx context.Context, from time.Time, to time.Time, stdTypes docker.StdType) (<-chan *docker.LogEvent, error) {
return c.clientService.LogsBetweenDates(ctx, c.Container, from, to, stdTypes)
}
func (c *containerService) StreamLogs(ctx context.Context, from time.Time, stdTypes docker.StdType, events chan<- *docker.LogEvent) error {
return c.clientService.StreamLogs(ctx, c.Container, from, stdTypes, events)
}
func (c *containerService) Action(action docker.ContainerAction) error {
return c.clientService.ContainerAction(c.Container, action)
}

View File

@@ -0,0 +1,215 @@
package docker_support
import (
"context"
"crypto/tls"
"fmt"
"net"
"github.com/amir20/dozzle/internal/agent"
"github.com/amir20/dozzle/internal/docker"
log "github.com/sirupsen/logrus"
"github.com/cenkalti/backoff/v4"
)
type ContainerFilter = func(*docker.Container) bool
type HostUnavailableError struct {
Host docker.Host
Err error
}
func (h *HostUnavailableError) Error() string {
return fmt.Sprintf("host %s unavailable: %v", h.Host.ID, h.Err)
}
type MultiHostService struct {
clients map[string]ClientService
SwarmMode bool
}
func NewMultiHostService(clients []ClientService) *MultiHostService {
m := &MultiHostService{
clients: make(map[string]ClientService),
}
for _, client := range clients {
if _, ok := m.clients[client.Host().ID]; ok {
log.Warnf("duplicate host %s found, skipping", client.Host())
continue
}
m.clients[client.Host().ID] = client
}
return m
}
func NewSwarmService(client docker.Client, certificates tls.Certificate) *MultiHostService {
m := &MultiHostService{
clients: make(map[string]ClientService),
SwarmMode: true,
}
localClient := NewDockerClientService(client)
m.clients[localClient.Host().ID] = localClient
discover := func() {
ips, err := net.LookupIP("tasks.dozzle")
if err != nil {
log.Fatalf("error looking up swarm services: %v", err)
}
found := 0
replaced := 0
for _, ip := range ips {
client, err := agent.NewClient(ip.String()+":7007", certificates)
if err != nil {
log.Warnf("error creating client for %s: %v", ip, err)
continue
}
if client.Host().ID == localClient.Host().ID {
continue
}
service := NewAgentService(client)
if existing, ok := m.clients[service.Host().ID]; !ok {
log.Debugf("adding swarm service %s", service.Host().ID)
m.clients[service.Host().ID] = service
found++
} else if existing.Host().Endpoint != service.Host().Endpoint {
log.Debugf("swarm service %s already exists with different endpoint %s and old value %s", service.Host().ID, service.Host().Endpoint, existing.Host().Endpoint)
delete(m.clients, existing.Host().ID)
m.clients[service.Host().ID] = service
replaced++
}
}
if found > 0 {
log.Infof("found %d new dozzle replicas", found)
}
if replaced > 0 {
log.Infof("replaced %d dozzle replicas", replaced)
}
}
go func() {
ticker := backoff.NewTicker(backoff.NewExponentialBackOff(
backoff.WithMaxElapsedTime(0)),
)
for range ticker.C {
log.Tracef("discovering swarm services")
discover()
}
}()
return m
}
func (m *MultiHostService) FindContainer(host string, id string) (*containerService, error) {
client, ok := m.clients[host]
if !ok {
return nil, fmt.Errorf("host %s not found", host)
}
container, err := client.FindContainer(id)
if err != nil {
return nil, err
}
return &containerService{
clientService: client,
Container: container,
}, nil
}
func (m *MultiHostService) ListContainersForHost(host string) ([]docker.Container, error) {
client, ok := m.clients[host]
if !ok {
return nil, fmt.Errorf("host %s not found", host)
}
return client.ListContainers()
}
func (m *MultiHostService) ListAllContainers() ([]docker.Container, []error) {
containers := make([]docker.Container, 0)
var errors []error
for _, client := range m.clients {
list, err := client.ListContainers()
if err != nil {
log.Debugf("error listing containers for host %s: %v", client.Host().ID, err)
errors = append(errors, &HostUnavailableError{Host: client.Host(), Err: err})
continue
}
containers = append(containers, list...)
}
return containers, errors
}
func (m *MultiHostService) ListAllContainersFiltered(filter ContainerFilter) ([]docker.Container, []error) {
containers, err := m.ListAllContainers()
filtered := make([]docker.Container, 0, len(containers))
for _, container := range containers {
if filter(&container) {
filtered = append(filtered, container)
}
}
return filtered, err
}
func (m *MultiHostService) SubscribeEventsAndStats(ctx context.Context, events chan<- docker.ContainerEvent, stats chan<- docker.ContainerStat) {
for _, client := range m.clients {
client.SubscribeEvents(ctx, events)
client.SubscribeStats(ctx, stats)
}
}
func (m *MultiHostService) SubscribeContainersStarted(ctx context.Context, containers chan<- docker.Container, filter ContainerFilter) {
newContainers := make(chan docker.Container)
for _, client := range m.clients {
client.SubscribeContainersStarted(ctx, newContainers)
}
go func() {
for container := range newContainers {
if filter(&container) {
select {
case containers <- container:
case <-ctx.Done():
return
}
}
}
}()
}
func (m *MultiHostService) TotalClients() int {
return len(m.clients)
}
func (m *MultiHostService) Hosts() []docker.Host {
hosts := make([]docker.Host, 0, len(m.clients))
for _, client := range m.clients {
hosts = append(hosts, client.Host())
}
return hosts
}
func (m *MultiHostService) LocalHost() (docker.Host, error) {
host := docker.Host{}
for _, host := range m.Hosts() {
if host.Endpoint == "local" {
return host, nil
}
}
return host, fmt.Errorf("local host not found")
}

View File

@@ -20,6 +20,20 @@ func NewRingBuffer[T any](size int) *RingBuffer[T] {
} }
} }
func RingBufferFrom[T any](size int, data []T) *RingBuffer[T] {
if len(data) == 0 {
return NewRingBuffer[T](size)
}
if len(data) > size {
data = data[len(data)-size:]
}
return &RingBuffer[T]{
Size: size,
data: data,
start: 0,
}
}
func (r *RingBuffer[T]) Push(data T) { func (r *RingBuffer[T]) Push(data T) {
r.mutex.Lock() r.mutex.Lock()
defer r.mutex.Unlock() defer r.mutex.Unlock()

View File

@@ -191,6 +191,9 @@ Content-Security-Policy: default-src 'self'; style-src 'self' 'unsafe-inline'; i
Content-Type: text/event-stream Content-Type: text/event-stream
X-Accel-Buffering: no X-Accel-Buffering: no
event: container-event
data: {"actorId":"123456","name":"container-stopped","host":"localhost"}
/* snapshot: Test_handler_streamLogs_happy_with_id */ /* snapshot: Test_handler_streamLogs_happy_with_id */
HTTP/1.1 200 OK HTTP/1.1 200 OK
Connection: close Connection: close

39
internal/web/actions.go Normal file
View File

@@ -0,0 +1,39 @@
package web
import (
"net/http"
"github.com/amir20/dozzle/internal/docker"
"github.com/go-chi/chi/v5"
log "github.com/sirupsen/logrus"
)
func (h *handler) containerActions(w http.ResponseWriter, r *http.Request) {
action := chi.URLParam(r, "action")
id := chi.URLParam(r, "id")
log.Debugf("container action: %s, container id: %s", action, id)
containerService, err := h.multiHostService.FindContainer(hostKey(r), id)
if err != nil {
log.Errorf("error while trying to find container: %v", err)
http.Error(w, err.Error(), http.StatusNotFound)
return
}
parsedAction, err := docker.ParseContainerAction(action)
if err != nil {
log.Errorf("error while trying to parse action: %s", action)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := containerService.Action(parsedAction); err != nil {
log.Errorf("error while trying to perform action: %s", action)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Infof("container action performed: %s; container id: %s", action, id)
http.Error(w, "", http.StatusNoContent)
}

View File

@@ -13,25 +13,26 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func get_mocked_client() *MockedClient { func mockedClient() *MockedClient {
mockedClient := new(MockedClient) mockedClient := new(MockedClient)
container := docker.Container{ID: "123"} container := docker.Container{ID: "123"}
mockedClient.On("FindContainer", "123").Return(container, nil) mockedClient.On("FindContainer", "123").Return(container, nil)
mockedClient.On("FindContainer", "456").Return(docker.Container{}, errors.New("container not found")) mockedClient.On("FindContainer", "456").Return(docker.Container{}, errors.New("container not found"))
mockedClient.On("ContainerActions", docker.Start, container.ID).Return(nil)
mockedClient.On("ContainerActions", "start", container.ID).Return(nil) mockedClient.On("ContainerActions", docker.Stop, container.ID).Return(nil)
mockedClient.On("ContainerActions", "stop", container.ID).Return(nil) mockedClient.On("ContainerActions", docker.Restart, container.ID).Return(nil)
mockedClient.On("ContainerActions", "restart", container.ID).Return(nil) mockedClient.On("ContainerActions", docker.Start, mock.Anything).Return(errors.New("container not found"))
mockedClient.On("ContainerActions", "something-else", container.ID).Return(errors.New("unknown action")) mockedClient.On("ContainerActions", docker.ContainerAction("something-else"), container.ID).Return(errors.New("unknown action"))
mockedClient.On("Host").Return(docker.Host{ID: "localhost"})
mockedClient.On("ContainerActions", "start", mock.Anything).Return(errors.New("container not found")) mockedClient.On("ListContainers").Return([]docker.Container{container}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.Anything).Return(nil)
return mockedClient return mockedClient
} }
func Test_handler_containerActions_stop(t *testing.T) { func Test_handler_containerActions_stop(t *testing.T) {
mockedClient := get_mocked_client() mockedClient := mockedClient()
handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}}) handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}})
req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/stop", nil) req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/stop", nil)
@@ -39,11 +40,11 @@ func Test_handler_containerActions_stop(t *testing.T) {
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) handler.ServeHTTP(rr, req)
assert.Equal(t, 200, rr.Code) assert.Equal(t, 204, rr.Code)
} }
func Test_handler_containerActions_restart(t *testing.T) { func Test_handler_containerActions_restart(t *testing.T) {
mockedClient := get_mocked_client() mockedClient := mockedClient()
handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}}) handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}})
req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/restart", nil) req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/restart", nil)
@@ -51,11 +52,11 @@ func Test_handler_containerActions_restart(t *testing.T) {
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) handler.ServeHTTP(rr, req)
assert.Equal(t, 200, rr.Code) assert.Equal(t, 204, rr.Code)
} }
func Test_handler_containerActions_unknown_action(t *testing.T) { func Test_handler_containerActions_unknown_action(t *testing.T) {
mockedClient := get_mocked_client() mockedClient := mockedClient()
handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}}) handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}})
req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/something-else", nil) req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/something-else", nil)
@@ -63,11 +64,11 @@ func Test_handler_containerActions_unknown_action(t *testing.T) {
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) handler.ServeHTTP(rr, req)
assert.Equal(t, 500, rr.Code) assert.Equal(t, 400, rr.Code)
} }
func Test_handler_containerActions_unknown_container(t *testing.T) { func Test_handler_containerActions_unknown_container(t *testing.T) {
mockedClient := get_mocked_client() mockedClient := mockedClient()
handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}}) handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}})
req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/456/actions/start", nil) req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/456/actions/start", nil)
@@ -79,7 +80,7 @@ func Test_handler_containerActions_unknown_container(t *testing.T) {
} }
func Test_handler_containerActions_start(t *testing.T) { func Test_handler_containerActions_start(t *testing.T) {
mockedClient := get_mocked_client() mockedClient := mockedClient()
handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}}) handler := createHandler(mockedClient, nil, Config{Base: "/", EnableActions: true, Authorization: Authorization{Provider: NONE}})
req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/start", nil) req, err := http.NewRequest("POST", "/api/hosts/localhost/containers/123/actions/start", nil)
@@ -87,5 +88,5 @@ func Test_handler_containerActions_start(t *testing.T) {
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) handler.ServeHTTP(rr, req)
assert.Equal(t, 200, rr.Code) assert.Equal(t, 204, rr.Code)
} }

View File

@@ -1,40 +0,0 @@
package web
import (
"net/http"
"github.com/go-chi/chi/v5"
log "github.com/sirupsen/logrus"
)
func (h *handler) containerActions(w http.ResponseWriter, r *http.Request) {
action := chi.URLParam(r, "action")
id := chi.URLParam(r, "id")
log.Debugf("container action: %s, container id: %s", action, id)
client := h.clientFromRequest(r)
if client == nil {
log.Errorf("no client found for host %v", r.URL)
w.WriteHeader(http.StatusBadRequest)
return
}
container, err := client.FindContainer(id)
if err != nil {
log.Error(err)
w.WriteHeader(http.StatusNotFound)
return
}
err = client.ContainerActions(action, container.ID)
if err != nil {
log.Errorf("error while trying to perform action: %s", action)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Infof("container action performed: %s; container id: %s", action, id)
w.WriteHeader(http.StatusOK)
}

View File

@@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"io" "io"
"time"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@@ -24,8 +25,17 @@ func Test_handler_download_logs(t *testing.T) {
data := makeMessage("INFO Testing logs...", docker.STDOUT) data := makeMessage("INFO Testing logs...", docker.STDOUT)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Tty: false}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Tty: false}, nil).Once()
mockedClient.On("ContainerLogsBetweenDates", mock.Anything, id, mock.Anything, mock.Anything, docker.STDOUT).Return(io.NopCloser(bytes.NewReader(data)), nil) mockedClient.On("ContainerLogsBetweenDates", mock.Anything, id, mock.Anything, mock.Anything, docker.STDOUT).Return(io.NopCloser(bytes.NewReader(data)), nil)
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
time.Sleep(1 * time.Second)
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test"},
}, nil)
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()

View File

@@ -8,6 +8,7 @@ import (
"github.com/amir20/dozzle/internal/analytics" "github.com/amir20/dozzle/internal/analytics"
"github.com/amir20/dozzle/internal/docker" "github.com/amir20/dozzle/internal/docker"
docker_support "github.com/amir20/dozzle/internal/support/docker"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@@ -27,29 +28,20 @@ func (h *handler) streamEvents(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
allContainers := make([]docker.Container, 0) allContainers, errors := h.multiHostService.ListAllContainers()
events := make(chan docker.ContainerEvent)
stats := make(chan docker.ContainerStat)
for _, store := range h.stores { for _, err := range errors {
if containers, err := store.List(); err == nil { log.Warnf("error listing containers: %v", err)
allContainers = append(allContainers, containers...) if hostNotAvailableError, ok := err.(*docker_support.HostUnavailableError); ok {
} else { if _, err := fmt.Fprintf(w, "event: host-unavailable\ndata: %s\n\n", hostNotAvailableError.Host.ID); err != nil {
log.Errorf("error listing containers: %v", err)
if _, err := fmt.Fprintf(w, "event: host-unavailable\ndata: %s\n\n", store.Client().Host().ID); err != nil {
log.Errorf("error writing event to event stream: %v", err) log.Errorf("error writing event to event stream: %v", err)
} }
} }
store.SubscribeStats(ctx, stats)
store.Subscribe(ctx, events)
} }
events := make(chan docker.ContainerEvent)
stats := make(chan docker.ContainerStat)
defer func() { h.multiHostService.SubscribeEventsAndStats(ctx, events, stats)
for _, store := range h.stores {
store.Unsubscribe(ctx)
}
}()
if err := sendContainersJSON(allContainers, w); err != nil { if err := sendContainersJSON(allContainers, w); err != nil {
log.Errorf("error writing containers to event stream: %v", err) log.Errorf("error writing containers to event stream: %v", err)
@@ -76,7 +68,7 @@ func (h *handler) streamEvents(w http.ResponseWriter, r *http.Request) {
case "start", "die": case "start", "die":
if event.Name == "start" { if event.Name == "start" {
log.Debugf("found new container with id: %v", event.ActorID) log.Debugf("found new container with id: %v", event.ActorID)
if containers, err := h.stores[event.Host].List(); err == nil { if containers, err := h.multiHostService.ListContainersForHost(event.Host); err == nil {
if err := sendContainersJSON(containers, w); err != nil { if err := sendContainersJSON(containers, w); err != nil {
log.Errorf("error encoding containers to stream: %v", err) log.Errorf("error encoding containers to stream: %v", err)
return return
@@ -118,32 +110,25 @@ func (h *handler) streamEvents(w http.ResponseWriter, r *http.Request) {
func sendBeaconEvent(h *handler, r *http.Request, runningContainers int) { func sendBeaconEvent(h *handler, r *http.Request, runningContainers int) {
b := analytics.BeaconEvent{ b := analytics.BeaconEvent{
Name: "events",
Version: h.config.Version,
Browser: r.Header.Get("User-Agent"),
AuthProvider: string(h.config.Authorization.Provider), AuthProvider: string(h.config.Authorization.Provider),
HasHostname: h.config.Hostname != "", Browser: r.Header.Get("User-Agent"),
HasCustomBase: h.config.Base != "/", Clients: h.multiHostService.TotalClients(),
HasCustomAddress: h.config.Addr != ":8080",
Clients: len(h.clients),
HasActions: h.config.EnableActions, HasActions: h.config.EnableActions,
HasCustomAddress: h.config.Addr != ":8080",
HasCustomBase: h.config.Base != "/",
HasHostname: h.config.Hostname != "",
Name: "events",
RunningContainers: runningContainers, RunningContainers: runningContainers,
Version: h.config.Version,
} }
for _, store := range h.stores { local, err := h.multiHostService.LocalHost()
if store.Client().IsSwarmMode() { if err == nil {
b.IsSwarmMode = true b.ServerID = local.ID
break
}
} }
if client, ok := h.clients["localhost"]; ok { if h.multiHostService.SwarmMode {
b.ServerID = client.SystemInfo().ID b.Mode = "swarm"
} else {
for _, client := range h.clients {
b.ServerID = client.SystemInfo().ID
break
}
} }
if !h.config.NoAnalytics { if !h.config.NoAnalytics {
@@ -151,7 +136,6 @@ func sendBeaconEvent(h *handler, r *http.Request, runningContainers int) {
log.Debugf("error sending beacon: %v", err) log.Debugf("error sending beacon: %v", err)
} }
} }
} }
func sendContainersJSON(containers []docker.Container, w http.ResponseWriter) error { func sendContainersJSON(containers []docker.Container, w http.ResponseWriter) error {

View File

@@ -9,6 +9,7 @@ import (
"testing" "testing"
"github.com/amir20/dozzle/internal/docker" "github.com/amir20/dozzle/internal/docker"
docker_support "github.com/amir20/dozzle/internal/support/docker"
"github.com/amir20/dozzle/internal/utils" "github.com/amir20/dozzle/internal/utils"
"github.com/beme/abide" "github.com/beme/abide"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
@@ -23,7 +24,7 @@ func Test_handler_streamEvents_happy(t *testing.T) {
mockedClient := new(MockedClient) mockedClient := new(MockedClient)
mockedClient.On("ListContainers").Return([]docker.Container{}, nil) mockedClient.On("ListContainers").Return([]docker.Container{}, nil)
mockedClient.On("Events", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) { mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
messages := args.Get(1).(chan<- docker.ContainerEvent) messages := args.Get(1).(chan<- docker.ContainerEvent)
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
@@ -47,16 +48,15 @@ func Test_handler_streamEvents_happy(t *testing.T) {
Stats: utils.NewRingBuffer[docker.ContainerStat](300), // 300 seconds of stats Stats: utils.NewRingBuffer[docker.ContainerStat](300), // 300 seconds of stats
}, nil) }, nil)
mockedClient.On("Host").Return(&docker.Host{ mockedClient.On("Host").Return(docker.Host{
ID: "localhost", ID: "localhost",
}) })
clients := map[string]docker.Client{
"localhost": mockedClient,
}
// This is needed so that the server is initialized for store // This is needed so that the server is initialized for store
server := CreateServer(clients, nil, Config{Base: "/", Authorization: Authorization{Provider: NONE}}) multiHostService := docker_support.NewMultiHostService(
[]docker_support.ClientService{docker_support.NewDockerClientService(mockedClient)},
)
server := CreateServer(multiHostService, nil, Config{Base: "/", Authorization: Authorization{Provider: NONE}})
handler := server.Handler handler := server.Handler
rr := httptest.NewRecorder() rr := httptest.NewRecorder()

View File

@@ -1,25 +1,19 @@
package web package web
import ( import (
"fmt"
"net/http" "net/http"
"github.com/amir20/dozzle/internal/docker"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func (h *handler) healthcheck(w http.ResponseWriter, r *http.Request) { func (h *handler) healthcheck(w http.ResponseWriter, r *http.Request) {
log.Trace("Executing healthcheck request") log.Trace("Executing healthcheck request")
var client docker.Client
for _, v := range h.clients {
client = v
break
}
if ping, err := client.Ping(r.Context()); err != nil { _, errors := h.multiHostService.ListAllContainers()
log.Error(err) if len(errors) > 0 {
http.Error(w, err.Error(), http.StatusInternalServerError) log.Error(errors)
http.Error(w, "Error listing containers", http.StatusInternalServerError)
} else { } else {
fmt.Fprintf(w, "OK API Version %v", ping.APIVersion) http.Error(w, "OK", http.StatusOK)
} }
} }

View File

@@ -14,7 +14,6 @@ import (
"path" "path"
"github.com/amir20/dozzle/internal/auth" "github.com/amir20/dozzle/internal/auth"
"github.com/amir20/dozzle/internal/docker"
"github.com/amir20/dozzle/internal/profile" "github.com/amir20/dozzle/internal/profile"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@@ -43,10 +42,7 @@ func (h *handler) executeTemplate(w http.ResponseWriter, req *http.Request) {
if h.config.Base != "/" { if h.config.Base != "/" {
base = h.config.Base base = h.config.Base
} }
hosts := make([]*docker.Host, 0, len(h.clients)) hosts := h.multiHostService.Hosts()
for _, v := range h.clients {
hosts = append(hosts, v.Host())
}
sort.Slice(hosts, func(i, j int) bool { sort.Slice(hosts, func(i, j int) bool {
return hosts[i].Name < hosts[j].Name return hosts[i].Name < hosts[j].Name
}) })

View File

@@ -3,6 +3,7 @@ package web
import ( import (
"compress/gzip" "compress/gzip"
"context" "context"
"errors"
"strings" "strings"
"github.com/goccy/go-json" "github.com/goccy/go-json"
@@ -24,7 +25,7 @@ import (
func (h *handler) downloadLogs(w http.ResponseWriter, r *http.Request) { func (h *handler) downloadLogs(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id") id := chi.URLParam(r, "id")
container, err := h.clientFromRequest(r).FindContainer(id) containerService, err := h.multiHostService.FindContainer(hostKey(r), id)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
@@ -33,7 +34,7 @@ func (h *handler) downloadLogs(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
nowFmt := now.Format("2006-01-02T15-04-05") nowFmt := now.Format("2006-01-02T15-04-05")
contentDisposition := fmt.Sprintf("attachment; filename=%s-%s.log", container.Name, nowFmt) contentDisposition := fmt.Sprintf("attachment; filename=%s-%s.log", containerService.Container.Name, nowFmt)
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Disposition", contentDisposition) w.Header().Set("Content-Disposition", contentDisposition)
@@ -59,16 +60,16 @@ func (h *handler) downloadLogs(w http.ResponseWriter, r *http.Request) {
zw := gzip.NewWriter(w) zw := gzip.NewWriter(w)
defer zw.Close() defer zw.Close()
zw.Name = fmt.Sprintf("%s-%s.log", container.Name, nowFmt) zw.Name = fmt.Sprintf("%s-%s.log", containerService.Container.Name, nowFmt)
zw.Comment = "Logs generated by Dozzle" zw.Comment = "Logs generated by Dozzle"
zw.ModTime = now zw.ModTime = now
reader, err := h.clientFromRequest(r).ContainerLogsBetweenDates(r.Context(), id, time.Time{}, now, stdTypes) reader, err := containerService.RawLogs(r.Context(), time.Time{}, now, stdTypes)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
if container.Tty { if containerService.Container.Tty {
io.Copy(zw, reader) io.Copy(zw, reader)
} else { } else {
stdcopy.StdCopy(zw, zw, reader) stdcopy.StdCopy(zw, zw, reader)
@@ -95,69 +96,31 @@ func (h *handler) fetchLogsBetweenDates(w http.ResponseWriter, r *http.Request)
return return
} }
container, err := h.clientFromRequest(r).FindContainer(id) containerService, err := h.multiHostService.FindContainer(hostKey(r), id)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusNotFound) http.Error(w, err.Error(), http.StatusNotFound)
return return
} }
reader, err := h.clientFromRequest(r).ContainerLogsBetweenDates(r.Context(), container.ID, from, to, stdTypes) events, err := containerService.LogsBetweenDates(r.Context(), from, to, stdTypes)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) log.Errorf("error while streaming logs %v", err.Error())
return
} }
g := docker.NewEventGenerator(reader, container)
encoder := json.NewEncoder(w) encoder := json.NewEncoder(w)
for event := range events {
for event := range g.Events {
if err := encoder.Encode(event); err != nil { if err := encoder.Encode(event); err != nil {
log.Errorf("json encoding error while streaming %v", err.Error()) log.Errorf("json encoding error while streaming %v", err.Error())
} }
} }
} }
func (h *handler) newContainers(ctx context.Context) chan docker.Container {
containers := make(chan docker.Container)
for _, store := range h.stores {
store.SubscribeNewContainers(ctx, containers)
}
return containers
}
func (h *handler) streamContainerLogs(w http.ResponseWriter, r *http.Request) { func (h *handler) streamContainerLogs(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id") id := chi.URLParam(r, "id")
container, err := h.clientFromRequest(r).FindContainer(id)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
containers := make(chan docker.Container, 1) streamLogsForContainers(w, r, h.multiHostService, func(container *docker.Container) bool {
containers <- container return container.ID == id && container.Host == hostKey(r)
})
go func() {
newContainers := h.newContainers(r.Context())
for {
select {
case container := <-newContainers:
if container.ID == id {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamContainerLogs")
return
}
}
case <-r.Context().Done():
log.Debugf("closing container channel streamContainerLogs")
return
}
}
}()
streamLogsForContainers(w, r, h.clients, containers)
} }
func (h *handler) streamLogsMerged(w http.ResponseWriter, r *http.Request) { func (h *handler) streamLogsMerged(w http.ResponseWriter, r *http.Request) {
@@ -166,157 +129,40 @@ func (h *handler) streamLogsMerged(w http.ResponseWriter, r *http.Request) {
return return
} }
containers := make(chan docker.Container, len(r.URL.Query()["id"])) ids := make(map[string]bool)
for _, id := range r.URL.Query()["id"] { for _, id := range r.URL.Query()["id"] {
container, err := h.clientFromRequest(r).FindContainer(id) ids[id] = true
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
containers <- container
} }
streamLogsForContainers(w, r, h.clients, containers) streamLogsForContainers(w, r, h.multiHostService, func(container *docker.Container) bool {
return ids[container.ID] && container.Host == hostKey(r)
})
} }
func (h *handler) streamServiceLogs(w http.ResponseWriter, r *http.Request) { func (h *handler) streamServiceLogs(w http.ResponseWriter, r *http.Request) {
service := chi.URLParam(r, "service") service := chi.URLParam(r, "service")
containers := make(chan docker.Container, 10) streamLogsForContainers(w, r, h.multiHostService, func(container *docker.Container) bool {
return container.State == "running" && container.Labels["com.docker.swarm.service.name"] == service
go func() { })
for _, store := range h.stores {
list, err := store.List()
if err != nil {
log.Errorf("error while listing containers %v", err.Error())
return
}
for _, container := range list {
if container.State == "running" && (container.Labels["com.docker.swarm.service.name"] == service) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
}
}
newContainers := h.newContainers(r.Context())
for {
select {
case container := <-newContainers:
if container.State == "running" && (container.Labels["com.docker.swarm.service.name"] == service) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
}()
streamLogsForContainers(w, r, h.clients, containers)
} }
func (h *handler) streamGroupedLogs(w http.ResponseWriter, r *http.Request) { func (h *handler) streamGroupedLogs(w http.ResponseWriter, r *http.Request) {
group := chi.URLParam(r, "group") group := chi.URLParam(r, "group")
containers := make(chan docker.Container, 10)
go func() { streamLogsForContainers(w, r, h.multiHostService, func(container *docker.Container) bool {
for _, store := range h.stores { return container.State == "running" && container.Group == group
list, err := store.List() })
if err != nil {
log.Errorf("error while listing containers %v", err.Error())
return
}
for _, container := range list {
if container.State == "running" && (container.Group == group) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
}
}
newContainers := h.newContainers(r.Context())
for {
select {
case container := <-newContainers:
if container.State == "running" && (container.Group == group) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
case <-r.Context().Done():
log.Debugf("closing container channel streamServiceLogs")
return
}
}
}()
streamLogsForContainers(w, r, h.clients, containers)
} }
func (h *handler) streamStackLogs(w http.ResponseWriter, r *http.Request) { func (h *handler) streamStackLogs(w http.ResponseWriter, r *http.Request) {
stack := chi.URLParam(r, "stack") stack := chi.URLParam(r, "stack")
containers := make(chan docker.Container, 10)
go func() { streamLogsForContainers(w, r, h.multiHostService, func(container *docker.Container) bool {
for _, store := range h.stores { return container.State == "running" && container.Labels["com.docker.stack.namespace"] == stack
list, err := store.List() })
if err != nil {
log.Errorf("error while listing containers %v", err.Error())
return
} }
for _, container := range list { func streamLogsForContainers(w http.ResponseWriter, r *http.Request, multiHostClient *MultiHostService, filter ContainerFilter) {
if container.State == "running" && (container.Labels["com.docker.stack.namespace"] == stack) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamStackLogs")
return
}
}
}
}
newContainers := h.newContainers(r.Context())
for {
select {
case container := <-newContainers:
if container.State == "running" && (container.Labels["com.docker.stack.namespace"] == stack) {
select {
case containers <- container:
case <-r.Context().Done():
log.Debugf("closing container channel streamStackLogs")
return
}
}
case <-r.Context().Done():
log.Debugf("closing container channel streamStackLogs")
return
}
}
}()
streamLogsForContainers(w, r, h.clients, containers)
}
func streamLogsForContainers(w http.ResponseWriter, r *http.Request, clients map[string]docker.Client, containers chan docker.Container) {
var stdTypes docker.StdType var stdTypes docker.StdType
if r.URL.Query().Has("stdout") { if r.URL.Query().Has("stdout") {
stdTypes |= docker.STDOUT stdTypes |= docker.STDOUT
@@ -347,8 +193,38 @@ func streamLogsForContainers(w http.ResponseWriter, r *http.Request, clients map
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop() defer ticker.Stop()
existingContainers, errs := multiHostClient.ListAllContainersFiltered(filter)
if len(errs) > 0 {
log.Warnf("error while listing containers %v", errs)
}
started := time.Now() streamLogs := func(container docker.Container) {
start := time.Time{}
if container.StartedAt != nil {
start = *container.StartedAt
}
containerService, err := multiHostClient.FindContainer(container.Host, container.ID)
if err != nil {
log.Errorf("error while finding container %v", err.Error())
return
}
err = containerService.StreamLogs(r.Context(), start, stdTypes, logs)
if err != nil {
if errors.Is(err, io.EOF) {
log.WithError(err).Debugf("stream closed for container %v", container.Name)
events <- &docker.ContainerEvent{ActorID: container.ID, Name: "container-stopped", Host: container.Host}
} else if !errors.Is(err, context.Canceled) {
log.Errorf("unknown error while streaming %v", err.Error())
}
}
}
for _, container := range existingContainers {
go streamLogs(container)
}
newContainers := make(chan docker.Container)
multiHostClient.SubscribeContainersStarted(r.Context(), newContainers, filter)
loop: loop:
for { for {
@@ -367,33 +243,9 @@ loop:
case <-ticker.C: case <-ticker.C:
fmt.Fprintf(w, ":ping \n\n") fmt.Fprintf(w, ":ping \n\n")
f.Flush() f.Flush()
case container := <-containers: case container := <-newContainers:
if container.StartedAt != nil && container.StartedAt.After(started) {
events <- &docker.ContainerEvent{ActorID: container.ID, Name: "container-started", Host: container.Host} events <- &docker.ContainerEvent{ActorID: container.ID, Name: "container-started", Host: container.Host}
} go streamLogs(container)
go func(container docker.Container) {
reader, err := clients[container.Host].ContainerLogs(r.Context(), container.ID, container.StartedAt, stdTypes)
if err != nil {
return
}
g := docker.NewEventGenerator(reader, container)
for event := range g.Events {
logs <- event
}
select {
case err := <-g.Errors:
if err != nil {
if err == io.EOF {
log.WithError(err).Debugf("stream closed for container %v", container.Name)
events <- &docker.ContainerEvent{ActorID: container.ID, Name: "container-stopped", Host: container.Host}
} else if err != r.Context().Err() {
log.Errorf("unknown error while streaming %v", err.Error())
}
}
default:
// do nothing
}
}(container)
case event := <-events: case event := <-events:
log.Debugf("received container event %v", event) log.Debugf("received container event %v", event)

View File

@@ -39,13 +39,22 @@ func Test_handler_streamLogs_happy(t *testing.T) {
now := time.Now() now := time.Now()
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Tty: false, Host: "localhost", StartedAt: &now}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Tty: false, Host: "localhost", StartedAt: &now}, nil)
mockedClient.On("ContainerLogs", mock.Anything, mock.Anything, &now, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil). mockedClient.On("ContainerLogs", mock.Anything, mock.Anything, now, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
go func() { go func() {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
cancel() cancel()
}() }()
}) })
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
time.Sleep(50 * time.Millisecond)
})
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
@@ -72,13 +81,24 @@ func Test_handler_streamLogs_happy_with_id(t *testing.T) {
started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC) started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil)
mockedClient.On("ContainerLogs", mock.Anything, mock.Anything, &started, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil). mockedClient.On("ContainerLogs", mock.Anything, mock.Anything, started, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
go func() { go func() {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
cancel() cancel()
}() }()
}) })
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).Run(func(args mock.Arguments) {
time.Sleep(50 * time.Millisecond)
})
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
@@ -101,13 +121,20 @@ func Test_handler_streamLogs_happy_container_stopped(t *testing.T) {
started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC) started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC)
mockedClient := new(MockedClient) mockedClient := new(MockedClient)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil)
mockedClient.On("ContainerLogs", mock.Anything, id, &started, docker.STDALL).Return(io.NopCloser(strings.NewReader("")), io.EOF). mockedClient.On("ContainerLogs", mock.Anything, id, started, docker.STDALL).Return(io.NopCloser(strings.NewReader("")), io.EOF).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
go func() { go func() {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
cancel() cancel()
}() }()
}) })
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil)
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
@@ -116,32 +143,37 @@ func Test_handler_streamLogs_happy_container_stopped(t *testing.T) {
mockedClient.AssertExpectations(t) mockedClient.AssertExpectations(t)
} }
func Test_handler_streamLogs_error_finding_container(t *testing.T) { // func Test_handler_streamLogs_error_finding_container(t *testing.T) {
id := "123456" // id := "123456"
ctx, cancel := context.WithCancel(context.Background()) // ctx, cancel := context.WithCancel(context.Background())
req, err := http.NewRequestWithContext(ctx, "GET", "/api/hosts/localhost/containers/"+id+"/logs/stream", nil) // req, err := http.NewRequestWithContext(ctx, "GET", "/api/hosts/localhost/containers/"+id+"/logs/stream", nil)
q := req.URL.Query() // q := req.URL.Query()
q.Add("stdout", "true") // q.Add("stdout", "true")
q.Add("stderr", "true") // q.Add("stderr", "true")
req.URL.RawQuery = q.Encode() // req.URL.RawQuery = q.Encode()
require.NoError(t, err, "NewRequest should not return an error.") // require.NoError(t, err, "NewRequest should not return an error.")
mockedClient := new(MockedClient) // mockedClient := new(MockedClient)
mockedClient.On("FindContainer", id).Return(docker.Container{}, errors.New("error finding container")). // mockedClient.On("FindContainer", id).Return(docker.Container{}, errors.New("error finding container")).
Run(func(args mock.Arguments) { // Run(func(args mock.Arguments) {
go func() { // go func() {
time.Sleep(50 * time.Millisecond) // time.Sleep(50 * time.Millisecond)
cancel() // cancel()
}() // }()
}) // })
// mockedClient.On("Host").Return(docker.Host{
// ID: "localhost",
// })
// mockedClient.On("ListContainers").Return([]docker.Container{}, nil)
// mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil)
handler := createDefaultHandler(mockedClient) // handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() // rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) // handler.ServeHTTP(rr, req)
abide.AssertHTTPResponse(t, t.Name(), rr.Result()) // abide.AssertHTTPResponse(t, t.Name(), rr.Result())
mockedClient.AssertExpectations(t) // mockedClient.AssertExpectations(t)
} // }
func Test_handler_streamLogs_error_reading(t *testing.T) { func Test_handler_streamLogs_error_reading(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -158,13 +190,20 @@ func Test_handler_streamLogs_error_reading(t *testing.T) {
started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC) started := time.Date(2020, time.May, 13, 18, 55, 37, 772853839, time.UTC)
mockedClient := new(MockedClient) mockedClient := new(MockedClient)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost", StartedAt: &started}, nil)
mockedClient.On("ContainerLogs", mock.Anything, id, &started, docker.STDALL).Return(io.NopCloser(strings.NewReader("")), errors.New("test error")). mockedClient.On("ContainerLogs", mock.Anything, id, started, docker.STDALL).Return(io.NopCloser(strings.NewReader("")), errors.New("test error")).
Run(func(args mock.Arguments) { Run(func(args mock.Arguments) {
go func() { go func() {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
cancel() cancel()
}() }()
}) })
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil)
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
@@ -181,12 +220,21 @@ func Test_handler_streamLogs_error_std(t *testing.T) {
mockedClient := new(MockedClient) mockedClient := new(MockedClient)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost"}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id, Host: "localhost"}, nil)
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil).
Run(func(args mock.Arguments) {
time.Sleep(50 * time.Millisecond)
})
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req) handler.ServeHTTP(rr, req)
abide.AssertHTTPResponse(t, t.Name(), rr.Result()) abide.AssertHTTPResponse(t, t.Name(), rr.Result())
mockedClient.AssertExpectations(t)
} }
func Test_handler_between_dates(t *testing.T) { func Test_handler_between_dates(t *testing.T) {
@@ -213,6 +261,13 @@ func Test_handler_between_dates(t *testing.T) {
mockedClient.On("ContainerLogsBetweenDates", mock.Anything, id, from, to, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil) mockedClient.On("ContainerLogsBetweenDates", mock.Anything, id, from, to, docker.STDALL).Return(io.NopCloser(bytes.NewReader(data)), nil)
mockedClient.On("FindContainer", id).Return(docker.Container{ID: id}, nil) mockedClient.On("FindContainer", id).Return(docker.Container{ID: id}, nil)
mockedClient.On("Host").Return(docker.Host{
ID: "localhost",
})
mockedClient.On("ListContainers").Return([]docker.Container{
{ID: id, Name: "test", Host: "localhost"},
}, nil)
mockedClient.On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil)
handler := createDefaultHandler(mockedClient) handler := createDefaultHandler(mockedClient)
rr := httptest.NewRecorder() rr := httptest.NewRecorder()

View File

@@ -1,14 +1,13 @@
package web package web
import ( import (
"context"
"io/fs" "io/fs"
"net/http" "net/http"
"strings" "strings"
"github.com/amir20/dozzle/internal/auth" "github.com/amir20/dozzle/internal/auth"
"github.com/amir20/dozzle/internal/docker" docker_support "github.com/amir20/dozzle/internal/support/docker"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@@ -45,23 +44,19 @@ type Authorizer interface {
} }
type handler struct { type handler struct {
clients map[string]docker.Client
stores map[string]*docker.ContainerStore
content fs.FS content fs.FS
config *Config config *Config
multiHostService *docker_support.MultiHostService
} }
func CreateServer(clients map[string]docker.Client, content fs.FS, config Config) *http.Server { type MultiHostService = docker_support.MultiHostService
stores := make(map[string]*docker.ContainerStore) type ContainerFilter = docker_support.ContainerFilter
for host, client := range clients {
stores[host] = docker.NewContainerStore(context.Background(), client)
}
func CreateServer(multiHostService *MultiHostService, content fs.FS, config Config) *http.Server {
handler := &handler{ handler := &handler{
clients: clients,
content: content, content: content,
config: &config, config: &config,
stores: stores, multiHostService: multiHostService,
} }
return &http.Server{Addr: config.Addr, Handler: createRouter(handler)} return &http.Server{Addr: config.Addr, Handler: createRouter(handler)}
@@ -134,17 +129,12 @@ func createRouter(h *handler) *chi.Mux {
return r return r
} }
func (h *handler) clientFromRequest(r *http.Request) docker.Client { func hostKey(r *http.Request) string {
host := chi.URLParam(r, "host") host := chi.URLParam(r, "host")
if host == "" { if host == "" {
log.Fatalf("No host found for url %v", r.URL) log.Fatalf("No host found for url %v", r.URL)
} }
if client, ok := h.clients[host]; ok { return host
return client
}
log.Fatalf("No client found for host %v and url %v", host, r.URL)
return nil
} }

View File

@@ -8,6 +8,7 @@ import (
"io/fs" "io/fs"
"github.com/amir20/dozzle/internal/docker" "github.com/amir20/dozzle/internal/docker"
docker_support "github.com/amir20/dozzle/internal/support/docker"
"github.com/docker/docker/api/types/system" "github.com/docker/docker/api/types/system"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
@@ -26,26 +27,26 @@ func (m *MockedClient) FindContainer(id string) (docker.Container, error) {
return args.Get(0).(docker.Container), args.Error(1) return args.Get(0).(docker.Container), args.Error(1)
} }
func (m *MockedClient) ContainerActions(action string, containerID string) error { func (m *MockedClient) ContainerActions(action docker.ContainerAction, containerID string) error {
args := m.Called(action, containerID) args := m.Called(action, containerID)
return args.Error(0) return args.Error(0)
} }
func (m *MockedClient) ContainerEvents(ctx context.Context, events chan<- docker.ContainerEvent) error {
args := m.Called(ctx, events)
return args.Error(0)
}
func (m *MockedClient) ListContainers() ([]docker.Container, error) { func (m *MockedClient) ListContainers() ([]docker.Container, error) {
args := m.Called() args := m.Called()
return args.Get(0).([]docker.Container), args.Error(1) return args.Get(0).([]docker.Container), args.Error(1)
} }
func (m *MockedClient) ContainerLogs(ctx context.Context, id string, since *time.Time, stdType docker.StdType) (io.ReadCloser, error) { func (m *MockedClient) ContainerLogs(ctx context.Context, id string, since time.Time, stdType docker.StdType) (io.ReadCloser, error) {
args := m.Called(ctx, id, since, stdType) args := m.Called(ctx, id, since, stdType)
return args.Get(0).(io.ReadCloser), args.Error(1) return args.Get(0).(io.ReadCloser), args.Error(1)
} }
func (m *MockedClient) Events(ctx context.Context, events chan<- docker.ContainerEvent) error {
args := m.Called(ctx, events)
return args.Error(0)
}
func (m *MockedClient) ContainerStats(context.Context, string, chan<- docker.ContainerStat) error { func (m *MockedClient) ContainerStats(context.Context, string, chan<- docker.ContainerStat) error {
return nil return nil
} }
@@ -55,9 +56,9 @@ func (m *MockedClient) ContainerLogsBetweenDates(ctx context.Context, id string,
return args.Get(0).(io.ReadCloser), args.Error(1) return args.Get(0).(io.ReadCloser), args.Error(1)
} }
func (m *MockedClient) Host() *docker.Host { func (m *MockedClient) Host() docker.Host {
args := m.Called() args := m.Called()
return args.Get(0).(*docker.Host) return args.Get(0).(docker.Host)
} }
func (m *MockedClient) IsSwarmMode() bool { func (m *MockedClient) IsSwarmMode() bool {
@@ -72,9 +73,10 @@ func createHandler(client docker.Client, content fs.FS, config Config) *chi.Mux
if client == nil { if client == nil {
client = new(MockedClient) client = new(MockedClient)
client.(*MockedClient).On("ListContainers").Return([]docker.Container{}, nil) client.(*MockedClient).On("ListContainers").Return([]docker.Container{}, nil)
client.(*MockedClient).On("Host").Return(&docker.Host{ client.(*MockedClient).On("Host").Return(docker.Host{
ID: "localhost", ID: "localhost",
}) })
client.(*MockedClient).On("ContainerEvents", mock.Anything, mock.AnythingOfType("chan<- docker.ContainerEvent")).Return(nil)
} }
if content == nil { if content == nil {
@@ -83,11 +85,9 @@ func createHandler(client docker.Client, content fs.FS, config Config) *chi.Mux
content = afero.NewIOFS(fs) content = afero.NewIOFS(fs)
} }
clients := map[string]docker.Client{ multiHostService := docker_support.NewMultiHostService([]docker_support.ClientService{docker_support.NewDockerClientService(client)})
"localhost": client,
}
return createRouter(&handler{ return createRouter(&handler{
clients: clients, multiHostService: multiHostService,
content: content, content: content,
config: &config, config: &config,
}) })

252
main.go
View File

@@ -3,22 +3,25 @@ package main
import ( import (
"context" "context"
"embed" "embed"
"errors" "io"
"io/fs" "io/fs"
"net"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"reflect"
"strings" "strings"
"syscall" "syscall"
"time" "time"
"github.com/alexflint/go-arg" "github.com/alexflint/go-arg"
"github.com/amir20/dozzle/internal/analytics" "github.com/amir20/dozzle/internal/agent"
"github.com/amir20/dozzle/internal/auth" "github.com/amir20/dozzle/internal/auth"
"github.com/amir20/dozzle/internal/docker" "github.com/amir20/dozzle/internal/docker"
"github.com/amir20/dozzle/internal/healthcheck" "github.com/amir20/dozzle/internal/healthcheck"
"github.com/amir20/dozzle/internal/support/cli"
docker_support "github.com/amir20/dozzle/internal/support/docker"
"github.com/amir20/dozzle/internal/web" "github.com/amir20/dozzle/internal/web"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@@ -37,20 +40,25 @@ type args struct {
AuthHeaderUser string `arg:"--auth-header-user,env:DOZZLE_AUTH_HEADER_USER" default:"Remote-User" help:"sets the HTTP Header to use for username in Forward Proxy configuration."` AuthHeaderUser string `arg:"--auth-header-user,env:DOZZLE_AUTH_HEADER_USER" default:"Remote-User" help:"sets the HTTP Header to use for username in Forward Proxy configuration."`
AuthHeaderEmail string `arg:"--auth-header-email,env:DOZZLE_AUTH_HEADER_EMAIL" default:"Remote-Email" help:"sets the HTTP Header to use for email in Forward Proxy configuration."` AuthHeaderEmail string `arg:"--auth-header-email,env:DOZZLE_AUTH_HEADER_EMAIL" default:"Remote-Email" help:"sets the HTTP Header to use for email in Forward Proxy configuration."`
AuthHeaderName string `arg:"--auth-header-name,env:DOZZLE_AUTH_HEADER_NAME" default:"Remote-Name" help:"sets the HTTP Header to use for name in Forward Proxy configuration."` AuthHeaderName string `arg:"--auth-header-name,env:DOZZLE_AUTH_HEADER_NAME" default:"Remote-Name" help:"sets the HTTP Header to use for name in Forward Proxy configuration."`
WaitForDockerSeconds int `arg:"--wait-for-docker-seconds,env:DOZZLE_WAIT_FOR_DOCKER_SECONDS" help:"wait for docker to be available for at most this many seconds before starting the server."`
EnableActions bool `arg:"--enable-actions,env:DOZZLE_ENABLE_ACTIONS" default:"false" help:"enables essential actions on containers from the web interface."` EnableActions bool `arg:"--enable-actions,env:DOZZLE_ENABLE_ACTIONS" default:"false" help:"enables essential actions on containers from the web interface."`
FilterStrings []string `arg:"env:DOZZLE_FILTER,--filter,separate" help:"filters docker containers using Docker syntax."` FilterStrings []string `arg:"env:DOZZLE_FILTER,--filter,separate" help:"filters docker containers using Docker syntax."`
Filter map[string][]string `arg:"-"` Filter map[string][]string `arg:"-"`
RemoteHost []string `arg:"env:DOZZLE_REMOTE_HOST,--remote-host,separate" help:"list of hosts to connect remotely"` RemoteHost []string `arg:"env:DOZZLE_REMOTE_HOST,--remote-host,separate" help:"list of hosts to connect remotely"`
RemoteAgent []string `arg:"env:DOZZLE_REMOTE_AGENT,--remote-agent,separate" help:"list of agents to connect remotely"`
NoAnalytics bool `arg:"--no-analytics,env:DOZZLE_NO_ANALYTICS" help:"disables anonymous analytics"` NoAnalytics bool `arg:"--no-analytics,env:DOZZLE_NO_ANALYTICS" help:"disables anonymous analytics"`
Mode string `arg:"env:DOZZLE_MODE" default:"server" help:"sets the mode to run in (server, swarm)"`
Healthcheck *HealthcheckCmd `arg:"subcommand:healthcheck" help:"checks if the server is running"` Healthcheck *HealthcheckCmd `arg:"subcommand:healthcheck" help:"checks if the server is running"`
Generate *GenerateCmd `arg:"subcommand:generate" help:"generates a configuration file for simple auth"` Generate *GenerateCmd `arg:"subcommand:generate" help:"generates a configuration file for simple auth"`
Agent *AgentCmd `arg:"subcommand:agent" help:"starts the agent"`
} }
type HealthcheckCmd struct { type HealthcheckCmd struct {
} }
type AgentCmd struct {
Addr string `arg:"env:DOZZLE_AGENT_ADDR" default:":7007" help:"sets the host:port to bind for the agent"`
}
type GenerateCmd struct { type GenerateCmd struct {
Username string `arg:"positional"` Username string `arg:"positional"`
Password string `arg:"--password, -p" help:"sets the password for the user"` Password string `arg:"--password, -p" help:"sets the password for the user"`
@@ -65,14 +73,65 @@ func (args) Version() string {
//go:embed all:dist //go:embed all:dist
var content embed.FS var content embed.FS
//go:embed shared_cert.pem shared_key.pem
var certs embed.FS
//go:generate protoc --go_out=. --go-grpc_out=. --proto_path=./protos ./protos/rpc.proto ./protos/types.proto
func main() { func main() {
cli.ValidateEnvVars(args{}, AgentCmd{})
args, subcommand := parseArgs() args, subcommand := parseArgs()
validateEnvVars()
if subcommand != nil { if subcommand != nil {
switch subcommand.(type) { switch subcommand.(type) {
case *AgentCmd:
client, err := docker.NewLocalClient(args.Filter, args.Hostname)
if err != nil {
log.Fatalf("Could not create docker client: %v", err)
}
certs, err := cli.ReadCertificates(certs)
if err != nil {
log.Fatalf("Could not read certificates: %v", err)
}
listener, err := net.Listen("tcp", args.Agent.Addr)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
tempFile, err := os.CreateTemp("/", "agent-*.addr")
if err != nil {
log.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(tempFile.Name())
io.WriteString(tempFile, listener.Addr().String())
agent.RunServer(client, certs, listener)
case *HealthcheckCmd: case *HealthcheckCmd:
files, err := os.ReadDir(".")
if err != nil {
log.Fatalf("Failed to read directory: %v", err)
}
agentAddress := ""
for _, file := range files {
if match, _ := filepath.Match("agent-*.addr", file.Name()); match {
data, err := os.ReadFile(file.Name())
if err != nil {
log.Fatalf("Failed to read file: %v", err)
}
agentAddress = string(data)
break
}
}
if agentAddress == "" {
if err := healthcheck.HttpRequest(args.Addr, args.Base); err != nil { if err := healthcheck.HttpRequest(args.Addr, args.Base); err != nil {
log.Fatal(err) log.Fatalf("Failed to make request: %v", err)
}
} else {
certs, err := cli.ReadCertificates(certs)
if err != nil {
log.Fatalf("Could not read certificates: %v", err)
}
if err := healthcheck.RPCRequest(agentAddress, certs); err != nil {
log.Fatalf("Failed to make request: %v", err)
}
} }
case *GenerateCmd: case *GenerateCmd:
@@ -88,7 +147,7 @@ func main() {
}, true) }, true)
if _, err := os.Stdout.Write(buffer.Bytes()); err != nil { if _, err := os.Stdout.Write(buffer.Bytes()); err != nil {
log.Fatal(err) log.Fatalf("Failed to write to stdout: %v", err)
} }
} }
@@ -101,16 +160,35 @@ func main() {
log.Infof("Dozzle version %s", version) log.Infof("Dozzle version %s", version)
clients := createClients(args, docker.NewClientWithFilters, docker.NewClientWithTlsAndFilter, args.Hostname) var multiHostService *docker_support.MultiHostService
if args.Mode == "server" {
if len(clients) == 0 { multiHostService = createMultiHostService(args)
if multiHostService.TotalClients() == 0 {
log.Fatal("Could not connect to any Docker Engines") log.Fatal("Could not connect to any Docker Engines")
} else { } else {
log.Infof("Connected to %d Docker Engine(s)", len(clients)) log.Infof("Connected to %d Docker Engine(s)", multiHostService.TotalClients())
}
} else if args.Mode == "swarm" {
localClient, err := docker.NewLocalClient(args.Filter, args.Hostname)
if err != nil {
log.Fatalf("Could not connect to local Docker Engine: %s", err)
}
certs, err := cli.ReadCertificates(certs)
if err != nil {
log.Fatalf("Could not read certificates: %v", err)
}
multiHostService = docker_support.NewSwarmService(localClient, certs)
log.Infof("Starting in Swarm mode")
listener, err := net.Listen("tcp", ":7007")
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
go agent.RunServer(localClient, certs, listener)
} else {
log.Fatalf("Invalid mode %s", args.Mode)
} }
srv := createServer(args, clients) srv := createServer(args, multiHostService)
go doStartEvent(args, clients)
go func() { go func() {
log.Infof("Accepting connections on %s", srv.Addr) log.Infof("Accepting connections on %s", srv.Addr)
if err := srv.ListenAndServe(); err != http.ErrServerClosed { if err := srv.ListenAndServe(); err != http.ErrServerClosed {
@@ -131,57 +209,19 @@ func main() {
log.Debug("shutdown complete") log.Debug("shutdown complete")
} }
func doStartEvent(arg args, clients map[string]docker.Client) { func createMultiHostService(args args) *docker_support.MultiHostService {
if arg.NoAnalytics { var clients []docker_support.ClientService
log.Debug("Analytics disabled.")
return
}
event := analytics.BeaconEvent{
Name: "start",
Version: version,
}
if client, ok := clients["localhost"]; ok {
event.ServerID = client.SystemInfo().ID
event.ServerVersion = client.SystemInfo().ServerVersion
} else {
for _, client := range clients {
event.ServerID = client.SystemInfo().ID
event.ServerVersion = client.SystemInfo().ServerVersion
break
}
}
if err := analytics.SendBeacon(event); err != nil {
log.Debug(err)
}
}
func createClients(args args,
localClientFactory func(map[string][]string) (docker.Client, error),
remoteClientFactory func(map[string][]string, docker.Host) (docker.Client, error),
hostname string) map[string]docker.Client {
clients := make(map[string]docker.Client)
if localClient, err := createLocalClient(args, localClientFactory); err == nil {
if hostname != "" {
localClient.Host().Name = hostname
}
clients[localClient.Host().ID] = localClient
}
for _, remoteHost := range args.RemoteHost { for _, remoteHost := range args.RemoteHost {
host, err := docker.ParseConnection(remoteHost) host, err := docker.ParseConnection(remoteHost)
if err != nil { if err != nil {
log.Fatalf("Could not parse remote host %s: %s", remoteHost, err) log.Fatalf("Could not parse remote host %s: %s", remoteHost, err)
} }
log.Debugf("Creating remote client for %s with %+v", host.Name, host) log.Debugf("creating remote client for %s with %+v", host.Name, host)
log.Infof("Creating client for %s with %s", host.Name, host.URL.String()) log.Infof("Creating client for %s with %s", host.Name, host.URL.String())
if client, err := remoteClientFactory(args.Filter, host); err == nil { if client, err := docker.NewRemoteClient(args.Filter, host); err == nil {
if _, err := client.ListContainers(); err == nil { if _, err := client.ListContainers(); err == nil {
log.Debugf("Connected to local Docker Engine") log.Debugf("connected to local Docker Engine")
clients[client.Host().ID] = client clients = append(clients, docker_support.NewDockerClientService(client))
} else { } else {
log.Warnf("Could not connect to remote host %s: %s", host.ID, err) log.Warnf("Could not connect to remote host %s: %s", host.ID, err)
} }
@@ -189,11 +229,40 @@ func createClients(args args,
log.Warnf("Could not create client for %s: %s", host.ID, err) log.Warnf("Could not create client for %s: %s", host.ID, err)
} }
} }
certs, err := cli.ReadCertificates(certs)
return clients if err != nil {
log.Fatalf("Could not read certificates: %v", err)
}
for _, remoteAgent := range args.RemoteAgent {
client, err := agent.NewClient(remoteAgent, certs)
if err != nil {
log.Warnf("Could not connect to remote agent %s: %s", remoteAgent, err)
continue
}
clients = append(clients, docker_support.NewAgentService(client))
} }
func createServer(args args, clients map[string]docker.Client) *http.Server { localClient, err := docker.NewLocalClient(args.Filter, args.Hostname)
if err == nil {
_, err := localClient.ListContainers()
if err != nil {
log.Debugf("could not connect to local Docker Engine: %s", err)
if !args.NoAnalytics {
go cli.StartEvent(version, args.Mode, args.RemoteAgent, args.RemoteHost, nil)
}
} else {
log.Debugf("connected to local Docker Engine")
if !args.NoAnalytics {
go cli.StartEvent(version, args.Mode, args.RemoteAgent, args.RemoteHost, localClient)
}
clients = append(clients, docker_support.NewDockerClientService(localClient))
}
}
return docker_support.NewMultiHostService(clients)
}
func createServer(args args, multiHostService *docker_support.MultiHostService) *http.Server {
_, dev := os.LookupEnv("DEV") _, dev := os.LookupEnv("DEV")
var provider web.AuthProvider = web.NONE var provider web.AuthProvider = web.NONE
@@ -257,38 +326,14 @@ func createServer(args args, clients map[string]docker.Client) *http.Server {
} }
} }
return web.CreateServer(clients, assets, config) return web.CreateServer(multiHostService, assets, config)
}
func createLocalClient(args args, localClientFactory func(map[string][]string) (docker.Client, error)) (docker.Client, error) {
for i := 1; ; i++ {
dockerClient, err := localClientFactory(args.Filter)
if err == nil {
_, err := dockerClient.ListContainers()
if err != nil {
log.Debugf("Could not connect to local Docker Engine: %s", err)
} else {
log.Debugf("Connected to local Docker Engine")
return dockerClient, nil
}
}
if args.WaitForDockerSeconds > 0 {
log.Infof("Waiting for Docker Engine (attempt %d): %s", i, err)
time.Sleep(5 * time.Second)
args.WaitForDockerSeconds -= 5
} else {
log.Debugf("Local Docker Engine not found")
break
}
}
return nil, errors.New("could not connect to local Docker Engine")
} }
func parseArgs() (args, interface{}) { func parseArgs() (args, interface{}) {
var args args var args args
parser := arg.MustParse(&args) parser := arg.MustParse(&args)
configureLogger(args.Level) cli.ConfigureLogger(args.Level)
args.Filter = make(map[string][]string) args.Filter = make(map[string][]string)
@@ -304,36 +349,3 @@ func parseArgs() (args, interface{}) {
return args, parser.Subcommand() return args, parser.Subcommand()
} }
func configureLogger(level string) {
if l, err := log.ParseLevel(level); err == nil {
log.SetLevel(l)
} else {
panic(err)
}
log.SetFormatter(&log.TextFormatter{
DisableLevelTruncation: true,
})
}
func validateEnvVars() {
argsType := reflect.TypeOf(args{})
expectedEnvs := make(map[string]bool)
for i := 0; i < argsType.NumField(); i++ {
field := argsType.Field(i)
for _, tag := range strings.Split(field.Tag.Get("arg"), ",") {
if strings.HasPrefix(tag, "env:") {
expectedEnvs[strings.TrimPrefix(tag, "env:")] = true
}
}
}
for _, env := range os.Environ() {
actual := strings.Split(env, "=")[0]
if strings.HasPrefix(actual, "DOZZLE_") && !expectedEnvs[actual] {
log.Warnf("Unexpected environment variable %s", actual)
}
}
}

View File

@@ -1,146 +0,0 @@
package main
import (
"context"
"errors"
"testing"
"github.com/amir20/dozzle/internal/docker"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/system"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
type fakeCLI struct {
docker.DockerCLI
mock.Mock
}
func (f *fakeCLI) ContainerList(context.Context, container.ListOptions) ([]types.Container, error) {
args := f.Called()
return args.Get(0).([]types.Container), args.Error(1)
}
func (f *fakeCLI) Info(context.Context) (system.Info, error) {
return system.Info{}, nil
}
func Test_valid_localhost(t *testing.T) {
client := new(fakeCLI)
client.On("ContainerList").Return([]types.Container{}, nil)
fakeClientFactory := func(filter map[string][]string) (docker.Client, error) {
return docker.NewClient(client, filters.NewArgs(), &docker.Host{
ID: "localhost",
}), nil
}
args := args{}
actualClient, _ := createLocalClient(args, fakeClientFactory)
assert.NotNil(t, actualClient)
client.AssertExpectations(t)
}
func Test_invalid_localhost(t *testing.T) {
client := new(fakeCLI)
client.On("ContainerList").Return([]types.Container{}, errors.New("error"))
fakeClientFactory := func(filter map[string][]string) (docker.Client, error) {
return docker.NewClient(client, filters.NewArgs(), &docker.Host{
ID: "localhost",
}), nil
}
args := args{}
actualClient, _ := createLocalClient(args, fakeClientFactory)
assert.Nil(t, actualClient)
client.AssertExpectations(t)
}
func Test_valid_remote(t *testing.T) {
local := new(fakeCLI)
local.On("ContainerList").Return([]types.Container{}, errors.New("error"))
fakeLocalClientFactory := func(filter map[string][]string) (docker.Client, error) {
return docker.NewClient(local, filters.NewArgs(), &docker.Host{
ID: "localhost",
}), nil
}
remote := new(fakeCLI)
remote.On("ContainerList").Return([]types.Container{}, nil)
fakeRemoteClientFactory := func(filter map[string][]string, host docker.Host) (docker.Client, error) {
return docker.NewClient(remote, filters.NewArgs(), &docker.Host{
ID: "test",
}), nil
}
args := args{
RemoteHost: []string{"tcp://test:2375"},
}
clients := createClients(args, fakeLocalClientFactory, fakeRemoteClientFactory, "")
assert.Equal(t, 1, len(clients))
assert.Contains(t, clients, "test")
assert.NotContains(t, clients, "localhost")
local.AssertExpectations(t)
remote.AssertExpectations(t)
}
func Test_valid_remote_and_local(t *testing.T) {
local := new(fakeCLI)
local.On("ContainerList").Return([]types.Container{}, nil)
fakeLocalClientFactory := func(filter map[string][]string) (docker.Client, error) {
return docker.NewClient(local, filters.NewArgs(), &docker.Host{
ID: "localhost",
}), nil
}
remote := new(fakeCLI)
remote.On("ContainerList").Return([]types.Container{}, nil)
fakeRemoteClientFactory := func(filter map[string][]string, host docker.Host) (docker.Client, error) {
return docker.NewClient(remote, filters.NewArgs(), &docker.Host{
ID: "test",
}), nil
}
args := args{
RemoteHost: []string{"tcp://test:2375"},
}
clients := createClients(args, fakeLocalClientFactory, fakeRemoteClientFactory, "")
assert.Equal(t, 2, len(clients))
assert.Contains(t, clients, "test")
assert.Contains(t, clients, "localhost")
local.AssertExpectations(t)
remote.AssertExpectations(t)
}
func Test_no_clients(t *testing.T) {
local := new(fakeCLI)
local.On("ContainerList").Return([]types.Container{}, errors.New("error"))
fakeLocalClientFactory := func(filter map[string][]string) (docker.Client, error) {
return docker.NewClient(local, filters.NewArgs(), &docker.Host{
ID: "localhost",
}), nil
}
fakeRemoteClientFactory := func(filter map[string][]string, host docker.Host) (docker.Client, error) {
client := new(fakeCLI)
return docker.NewClient(client, filters.NewArgs(), &docker.Host{
ID: "test",
}), nil
}
args := args{}
clients := createClients(args, fakeLocalClientFactory, fakeRemoteClientFactory, "")
assert.Equal(t, 0, len(clients))
local.AssertExpectations(t)
}

View File

@@ -15,8 +15,9 @@
"license": "ISC", "license": "ISC",
"author": "Amir Raminfar <findamir@gmail.com>", "author": "Amir Raminfar <findamir@gmail.com>",
"scripts": { "scripts": {
"agent:dev": "DOZZLE_AGENT_ADDR=localhost:7007 reflex -c .reflex.agent",
"watch:frontend": "vite --open http://localhost:3100/", "watch:frontend": "vite --open http://localhost:3100/",
"watch:backend": "LIVE_FS=true DEV=true DOZZLE_ADDR=localhost:3100 reflex -c .reflex", "watch:backend": "LIVE_FS=true DEV=true DOZZLE_ADDR=localhost:3100 reflex -c .reflex.server",
"dev": "concurrently --kill-others \"npm:watch:*\"", "dev": "concurrently --kill-others \"npm:watch:*\"",
"build": "vite build", "build": "vite build",
"preview": "LIVE_FS=true DOZZLE_ADDR=localhost:3100 reflex -c .reflex", "preview": "LIVE_FS=true DOZZLE_ADDR=localhost:3100 reflex -c .reflex",

66
protos/rpc.proto Normal file
View File

@@ -0,0 +1,66 @@
syntax = "proto3";
option go_package = "internal/agent/pb";
package protobuf;
import "types.proto";
import "google/protobuf/timestamp.proto";
service AgentService {
rpc ListContainers(ListContainersRequest) returns (ListContainersResponse) {}
rpc FindContainer(FindContainerRequest) returns (FindContainerResponse) {}
rpc StreamLogs(StreamLogsRequest) returns (stream StreamLogsResponse) {}
rpc LogsBetweenDates(LogsBetweenDatesRequest)
returns (stream StreamLogsResponse) {}
rpc StreamRawBytes(StreamRawBytesRequest)
returns (stream StreamRawBytesResponse) {}
rpc StreamEvents(StreamEventsRequest) returns (stream StreamEventsResponse) {}
rpc StreamStats(StreamStatsRequest) returns (stream StreamStatsResponse) {}
rpc StreamContainerStarted(StreamContainerStartedRequest)
returns (stream StreamContainerStartedResponse) {}
rpc HostInfo(HostInfoRequest) returns (HostInfoResponse) {}
}
message ListContainersRequest {}
message ListContainersResponse { repeated Container containers = 1; }
message FindContainerRequest { string containerId = 1; }
message FindContainerResponse { Container container = 1; }
message StreamLogsRequest {
string containerId = 1;
google.protobuf.Timestamp since = 2;
int32 streamTypes = 3;
}
message StreamLogsResponse { LogEvent event = 1; }
message LogsBetweenDatesRequest {
string containerId = 1;
google.protobuf.Timestamp since = 2;
google.protobuf.Timestamp until = 3;
int32 streamTypes = 4;
}
message StreamRawBytesRequest {
string containerId = 1;
google.protobuf.Timestamp since = 2;
google.protobuf.Timestamp until = 3;
int32 streamTypes = 4;
}
message StreamRawBytesResponse { bytes data = 1; }
message StreamEventsRequest {}
message StreamEventsResponse { ContainerEvent event = 1; }
message StreamStatsRequest {}
message StreamStatsResponse { ContainerStat stat = 1; }
message HostInfoRequest {}
message HostInfoResponse { Host host = 1; }
message StreamContainerStartedRequest {}
message StreamContainerStartedResponse { Container container = 1; }

65
protos/types.proto Normal file
View File

@@ -0,0 +1,65 @@
syntax = "proto3";
option go_package = "internal/agent/pb";
package protobuf;
import "google/protobuf/timestamp.proto";
import "google/protobuf/any.proto";
message Container {
string id = 1;
string name = 2;
string image = 3;
string status = 4;
string state = 5;
string ImageId = 6;
google.protobuf.Timestamp created = 7;
google.protobuf.Timestamp started = 8;
string health = 9;
string host = 10;
bool tty = 11;
map<string, string> labels = 12;
repeated ContainerStat stats = 13;
string group = 14;
string command = 15;
}
message ContainerStat {
string id = 1;
double cpuPercent = 2;
double memoryUsage = 3;
double memoryPercent = 4;
}
message LogEvent {
uint32 id = 1;
string containerId = 2;
google.protobuf.Any message = 3;
google.protobuf.Timestamp timestamp = 4;
string level = 5;
string stream = 6;
string position = 7;
}
message SimpleMessage { string message = 1; }
message ComplexMessage { bytes data = 1; }
message ContainerEvent {
string actorId = 1;
string name = 2;
string host = 3;
}
message Host {
string id = 1;
string name = 2;
string nodeAddress = 3;
bool swarm = 4;
map<string, string> labels = 5;
string operatingSystem = 6;
string osVersion = 7;
string osType = 8;
uint32 cpuCores = 9;
uint32 memory = 10;
}